repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
zfrenchee/pandas | pandas/tests/indexing/test_multiindex.py | 1 | 50115 | from warnings import catch_warnings
import pytest
import numpy as np
import pandas as pd
from pandas import (Panel, Series, MultiIndex, DataFrame,
Timestamp, Index, date_range)
from pandas.util import testing as tm
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.tests.indexing.common import _mklbl
class TestMultiIndexBasic(object):
def test_iloc_getitem_multiindex2(self):
# TODO(wesm): fix this
pytest.skip('this test was being suppressed, '
'needs to be fixed')
arr = np.random.randn(3, 3)
df = DataFrame(arr, columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
rs = df.iloc[2]
xp = Series(arr[2], index=df.columns)
tm.assert_series_equal(rs, xp)
rs = df.iloc[:, 2]
xp = Series(arr[:, 2], index=df.index)
tm.assert_series_equal(rs, xp)
rs = df.iloc[2, 2]
xp = df.values[2, 2]
assert rs == xp
# for multiple items
# GH 5528
rs = df.iloc[[0, 1]]
xp = df.xs(4, drop_level=False)
tm.assert_frame_equal(rs, xp)
tup = zip(*[['a', 'a', 'b', 'b'], ['x', 'y', 'x', 'y']])
index = MultiIndex.from_tuples(tup)
df = DataFrame(np.random.randn(4, 4), index=index)
rs = df.iloc[[2, 3]]
xp = df.xs('b', drop_level=False)
tm.assert_frame_equal(rs, xp)
def test_setitem_multiindex(self):
with catch_warnings(record=True):
for index_fn in ('ix', 'loc'):
def assert_equal(a, b):
assert a == b
def check(target, indexers, value, compare_fn, expected=None):
fn = getattr(target, index_fn)
fn.__setitem__(indexers, value)
result = fn.__getitem__(indexers)
if expected is None:
expected = value
compare_fn(result, expected)
# GH7190
index = MultiIndex.from_product([np.arange(0, 100),
np.arange(0, 80)],
names=['time', 'firm'])
t, n = 0, 2
df = DataFrame(np.nan, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=0,
compare_fn=assert_equal)
df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=1,
compare_fn=assert_equal)
df = DataFrame(columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=2,
compare_fn=assert_equal)
# gh-7218: assigning with 0-dim arrays
df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df,
indexers=((t, n), 'X'),
value=np.array(3),
compare_fn=assert_equal,
expected=3, )
# GH5206
df = DataFrame(np.arange(25).reshape(5, 5),
columns='A,B,C,D,E'.split(','), dtype=float)
df['F'] = 99
row_selection = df['A'] % 2 == 0
col_selection = ['B', 'C']
with catch_warnings(record=True):
df.ix[row_selection, col_selection] = df['F']
output = DataFrame(99., index=[0, 2, 4], columns=['B', 'C'])
with catch_warnings(record=True):
tm.assert_frame_equal(df.ix[row_selection, col_selection],
output)
check(target=df,
indexers=(row_selection, col_selection),
value=df['F'],
compare_fn=tm.assert_frame_equal,
expected=output, )
# GH11372
idx = MultiIndex.from_product([
['A', 'B', 'C'],
date_range('2015-01-01', '2015-04-01', freq='MS')])
cols = MultiIndex.from_product([
['foo', 'bar'],
date_range('2016-01-01', '2016-02-01', freq='MS')])
df = DataFrame(np.random.random((12, 4)),
index=idx, columns=cols)
subidx = MultiIndex.from_tuples(
[('A', Timestamp('2015-01-01')),
('A', Timestamp('2015-02-01'))])
subcols = MultiIndex.from_tuples(
[('foo', Timestamp('2016-01-01')),
('foo', Timestamp('2016-02-01'))])
vals = DataFrame(np.random.random((2, 2)),
index=subidx, columns=subcols)
check(target=df,
indexers=(subidx, subcols),
value=vals,
compare_fn=tm.assert_frame_equal, )
# set all columns
vals = DataFrame(
np.random.random((2, 4)), index=subidx, columns=cols)
check(target=df,
indexers=(subidx, slice(None, None, None)),
value=vals,
compare_fn=tm.assert_frame_equal, )
# identity
copy = df.copy()
check(target=df, indexers=(df.index, df.columns), value=df,
compare_fn=tm.assert_frame_equal, expected=copy)
def test_loc_getitem_series(self):
# GH14730
# passing a series as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = Series([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
result = x.loc[[1, 3]]
tm.assert_series_equal(result, expected)
# GH15424
y1 = Series([1, 3], index=[1, 2])
result = x.loc[y1]
tm.assert_series_equal(result, expected)
empty = Series(data=[], dtype=np.float64)
expected = Series([], index=MultiIndex(
levels=index.levels, labels=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
def test_loc_getitem_array(self):
# GH15434
# passing an array as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = np.array([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
# empty array:
empty = np.array([])
expected = Series([], index=MultiIndex(
levels=index.levels, labels=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
# 0-dim array (scalar):
scalar = np.int64(1)
expected = Series(
data=[0, 1, 2],
index=['A', 'B', 'C'],
dtype=np.float64)
result = x.loc[scalar]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_multiindex(self):
mi_labels = DataFrame(np.random.randn(4, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j', 'k'],
['X', 'X', 'Y', 'Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
# the first row
rs = mi_int.iloc[0]
with catch_warnings(record=True):
xp = mi_int.ix[4].ix[8]
tm.assert_series_equal(rs, xp, check_names=False)
assert rs.name == (4, 8)
assert xp.name == 8
# 2nd (last) columns
rs = mi_int.iloc[:, 2]
with catch_warnings(record=True):
xp = mi_int.ix[:, 2]
tm.assert_series_equal(rs, xp)
# corner column
rs = mi_int.iloc[2, 2]
with catch_warnings(record=True):
xp = mi_int.ix[:, 2].ix[2]
assert rs == xp
# this is basically regular indexing
rs = mi_labels.iloc[2, 2]
with catch_warnings(record=True):
xp = mi_labels.ix['j'].ix[:, 'j'].ix[0, 0]
assert rs == xp
def test_loc_multiindex(self):
mi_labels = DataFrame(np.random.randn(3, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
# the first row
rs = mi_labels.loc['i']
with catch_warnings(record=True):
xp = mi_labels.ix['i']
tm.assert_frame_equal(rs, xp)
# 2nd (last) columns
rs = mi_labels.loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# corner column
rs = mi_labels.loc['j'].loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix['j'].ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# with a tuple
rs = mi_labels.loc[('i', 'X')]
with catch_warnings(record=True):
xp = mi_labels.ix[('i', 'X')]
tm.assert_frame_equal(rs, xp)
rs = mi_int.loc[4]
with catch_warnings(record=True):
xp = mi_int.ix[4]
tm.assert_frame_equal(rs, xp)
def test_getitem_partial_int(self):
# GH 12416
# with single item
l1 = [10, 20]
l2 = ['a', 'b']
df = DataFrame(index=range(2),
columns=MultiIndex.from_product([l1, l2]))
expected = DataFrame(index=range(2),
columns=l2)
result = df[20]
tm.assert_frame_equal(result, expected)
# with list
expected = DataFrame(index=range(2),
columns=MultiIndex.from_product([l1[1:], l2]))
result = df[[20]]
tm.assert_frame_equal(result, expected)
# missing item:
with tm.assert_raises_regex(KeyError, '1'):
df[1]
with tm.assert_raises_regex(KeyError, r"'\[1\] not in index'"):
df[[1]]
def test_loc_multiindex_indexer_none(self):
# GH6788
# multi-index indexer is None (meaning take all)
attributes = ['Attribute' + str(i) for i in range(1)]
attribute_values = ['Value' + str(i) for i in range(5)]
index = MultiIndex.from_product([attributes, attribute_values])
df = 0.1 * np.random.randn(10, 1 * 5) + 0.5
df = DataFrame(df, columns=index)
result = df[attributes]
tm.assert_frame_equal(result, df)
# GH 7349
# loc with a multi-index seems to be doing fallback
df = DataFrame(np.arange(12).reshape(-1, 1),
index=MultiIndex.from_product([[1, 2, 3, 4],
[1, 2, 3]]))
expected = df.loc[([1, 2], ), :]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_incomplete(self):
# GH 7399
# incomplete indexers
s = Series(np.arange(15, dtype='int64'),
MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.loc[:, 'a':'c']
result = s.loc[0:4, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[:4, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[0:, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
# GH 7400
# multiindexer gettitem with list of indexers skips wrong element
s = Series(np.arange(15, dtype='int64'),
MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.iloc[[6, 7, 8, 12, 13, 14]]
result = s.loc[2:4:2, 'a':'c']
tm.assert_series_equal(result, expected)
def test_multiindex_perf_warn(self):
df = DataFrame({'jim': [0, 0, 1, 1],
'joe': ['x', 'x', 'z', 'y'],
'jolie': np.random.rand(4)}).set_index(['jim', 'joe'])
with tm.assert_produces_warning(PerformanceWarning,
clear=[pd.core.index]):
df.loc[(1, 'z')]
df = df.iloc[[2, 1, 3, 0]]
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(0, )]
def test_series_getitem_multiindex(self):
# GH 6018
# series regression getitem with a multi-index
s = Series([1, 2, 3])
s.index = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 1)])
result = s[:, 0]
expected = Series([1], index=[0])
tm.assert_series_equal(result, expected)
result = s.loc[:, 1]
expected = Series([2, 3], index=[1, 2])
tm.assert_series_equal(result, expected)
# xs
result = s.xs(0, level=0)
expected = Series([1], index=[0])
tm.assert_series_equal(result, expected)
result = s.xs(1, level=1)
expected = Series([2, 3], index=[1, 2])
tm.assert_series_equal(result, expected)
# GH6258
dt = list(date_range('20130903', periods=3))
idx = MultiIndex.from_product([list('AB'), dt])
s = Series([1, 3, 4, 1, 3, 4], index=idx)
result = s.xs('20130903', level=1)
expected = Series([1, 1], index=list('AB'))
tm.assert_series_equal(result, expected)
# GH5684
idx = MultiIndex.from_tuples([('a', 'one'), ('a', 'two'), ('b', 'one'),
('b', 'two')])
s = Series([1, 2, 3, 4], index=idx)
s.index.set_names(['L1', 'L2'], inplace=True)
result = s.xs('one', level='L2')
expected = Series([1, 3], index=['a', 'b'])
expected.index.set_names(['L1'], inplace=True)
tm.assert_series_equal(result, expected)
def test_xs_multiindex(self):
# GH2903
columns = MultiIndex.from_tuples(
[('a', 'foo'), ('a', 'bar'), ('b', 'hello'),
('b', 'world')], names=['lvl0', 'lvl1'])
df = DataFrame(np.random.randn(4, 4), columns=columns)
df.sort_index(axis=1, inplace=True)
result = df.xs('a', level='lvl0', axis=1)
expected = df.iloc[:, 0:2].loc[:, 'a']
tm.assert_frame_equal(result, expected)
result = df.xs('foo', level='lvl1', axis=1)
expected = df.iloc[:, 1:2].copy()
expected.columns = expected.columns.droplevel('lvl1')
tm.assert_frame_equal(result, expected)
def test_multiindex_setitem(self):
# GH 3738
# setting with a multi-index right hand side
arrays = [np.array(['bar', 'bar', 'baz', 'qux', 'qux', 'bar']),
np.array(['one', 'two', 'one', 'one', 'two', 'one']),
np.arange(0, 6, 1)]
df_orig = DataFrame(np.random.randn(6, 3), index=arrays,
columns=['A', 'B', 'C']).sort_index()
expected = df_orig.loc[['bar']] * 2
df = df_orig.copy()
df.loc[['bar']] *= 2
tm.assert_frame_equal(df.loc[['bar']], expected)
# raise because these have differing levels
def f():
df.loc['bar'] *= 2
pytest.raises(TypeError, f)
# from SO
# http://stackoverflow.com/questions/24572040/pandas-access-the-level-of-multiindex-for-inplace-operation
df_orig = DataFrame.from_dict({'price': {
('DE', 'Coal', 'Stock'): 2,
('DE', 'Gas', 'Stock'): 4,
('DE', 'Elec', 'Demand'): 1,
('FR', 'Gas', 'Stock'): 5,
('FR', 'Solar', 'SupIm'): 0,
('FR', 'Wind', 'SupIm'): 0
}})
df_orig.index = MultiIndex.from_tuples(df_orig.index,
names=['Sit', 'Com', 'Type'])
expected = df_orig.copy()
expected.iloc[[0, 2, 3]] *= 2
idx = pd.IndexSlice
df = df_orig.copy()
df.loc[idx[:, :, 'Stock'], :] *= 2
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[idx[:, :, 'Stock'], 'price'] *= 2
tm.assert_frame_equal(df, expected)
def test_getitem_duplicates_multiindex(self):
# GH 5725 the 'A' happens to be a valid Timestamp so the doesn't raise
# the appropriate error, only in PY3 of course!
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
arr = np.random.randn(len(index), 1)
df = DataFrame(arr, index=index, columns=['val'])
result = df.val['D']
expected = Series(arr.ravel()[0:3], name='val', index=Index(
[26, 37, 57], name='day'))
tm.assert_series_equal(result, expected)
def f():
df.val['A']
pytest.raises(KeyError, f)
def f():
df.val['X']
pytest.raises(KeyError, f)
# A is treated as a special Timestamp
index = MultiIndex(levels=[['A', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
df = DataFrame(arr, index=index, columns=['val'])
result = df.val['A']
expected = Series(arr.ravel()[0:3], name='val', index=Index(
[26, 37, 57], name='day'))
tm.assert_series_equal(result, expected)
def f():
df.val['X']
pytest.raises(KeyError, f)
# GH 7866
# multi-index slicing with missing indexers
idx = MultiIndex.from_product([['A', 'B', 'C'],
['foo', 'bar', 'baz']],
names=['one', 'two'])
s = Series(np.arange(9, dtype='int64'), index=idx).sort_index()
exp_idx = MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']],
names=['one', 'two'])
expected = Series(np.arange(3, dtype='int64'),
index=exp_idx).sort_index()
result = s.loc[['A']]
tm.assert_series_equal(result, expected)
result = s.loc[['A', 'D']]
tm.assert_series_equal(result, expected)
# not any values found
pytest.raises(KeyError, lambda: s.loc[['D']])
# empty ok
result = s.loc[[]]
expected = s.iloc[[]]
tm.assert_series_equal(result, expected)
idx = pd.IndexSlice
expected = Series([0, 3, 6], index=MultiIndex.from_product(
[['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index()
result = s.loc[idx[:, ['foo']]]
tm.assert_series_equal(result, expected)
result = s.loc[idx[:, ['foo', 'bah']]]
tm.assert_series_equal(result, expected)
# GH 8737
# empty indexer
multi_index = MultiIndex.from_product((['foo', 'bar', 'baz'],
['alpha', 'beta']))
df = DataFrame(
np.random.randn(5, 6), index=range(5), columns=multi_index)
df = df.sort_index(level=0, axis=1)
expected = DataFrame(index=range(5),
columns=multi_index.reindex([])[0])
result1 = df.loc[:, ([], slice(None))]
result2 = df.loc[:, (['foo'], [])]
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
# regression from < 0.14.0
# GH 7914
df = DataFrame([[np.mean, np.median], ['mean', 'median']],
columns=MultiIndex.from_tuples([('functs', 'mean'),
('functs', 'median')]),
index=['function', 'name'])
result = df.loc['function', ('functs', 'mean')]
assert result == np.mean
def test_multiindex_assignment(self):
# GH3777 part 2
# mixed dtype
df = DataFrame(np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list('abc'),
index=[[4, 4, 8], [8, 10, 12]])
df['d'] = np.nan
arr = np.array([0., 1.])
with catch_warnings(record=True):
df.ix[4, 'd'] = arr
tm.assert_series_equal(df.ix[4, 'd'],
Series(arr, index=[8, 10], name='d'))
# single dtype
df = DataFrame(np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list('abc'),
index=[[4, 4, 8], [8, 10, 12]])
with catch_warnings(record=True):
df.ix[4, 'c'] = arr
exp = Series(arr, index=[8, 10], name='c', dtype='float64')
tm.assert_series_equal(df.ix[4, 'c'], exp)
# scalar ok
with catch_warnings(record=True):
df.ix[4, 'c'] = 10
exp = Series(10, index=[8, 10], name='c', dtype='float64')
tm.assert_series_equal(df.ix[4, 'c'], exp)
# invalid assignments
def f():
with catch_warnings(record=True):
df.ix[4, 'c'] = [0, 1, 2, 3]
pytest.raises(ValueError, f)
def f():
with catch_warnings(record=True):
df.ix[4, 'c'] = [0]
pytest.raises(ValueError, f)
# groupby example
NUM_ROWS = 100
NUM_COLS = 10
col_names = ['A' + num for num in
map(str, np.arange(NUM_COLS).tolist())]
index_cols = col_names[:5]
df = DataFrame(np.random.randint(5, size=(NUM_ROWS, NUM_COLS)),
dtype=np.int64, columns=col_names)
df = df.set_index(index_cols).sort_index()
grp = df.groupby(level=index_cols[:4])
df['new_col'] = np.nan
f_index = np.arange(5)
def f(name, df2):
return Series(np.arange(df2.shape[0]),
name=df2.index.values[0]).reindex(f_index)
# TODO(wesm): unused?
# new_df = pd.concat([f(name, df2) for name, df2 in grp], axis=1).T
# we are actually operating on a copy here
# but in this case, that's ok
for name, df2 in grp:
new_vals = np.arange(df2.shape[0])
with catch_warnings(record=True):
df.ix[name, 'new_col'] = new_vals
def test_multiindex_label_slicing_with_negative_step(self):
s = Series(np.arange(20),
MultiIndex.from_product([list('abcde'), np.arange(4)]))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
with catch_warnings(record=True):
tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
assert_slices_equivalent(SLC[::-1], SLC[::-1])
assert_slices_equivalent(SLC['d'::-1], SLC[15::-1])
assert_slices_equivalent(SLC[('d', )::-1], SLC[15::-1])
assert_slices_equivalent(SLC[:'d':-1], SLC[:11:-1])
assert_slices_equivalent(SLC[:('d', ):-1], SLC[:11:-1])
assert_slices_equivalent(SLC['d':'b':-1], SLC[15:3:-1])
assert_slices_equivalent(SLC[('d', ):'b':-1], SLC[15:3:-1])
assert_slices_equivalent(SLC['d':('b', ):-1], SLC[15:3:-1])
assert_slices_equivalent(SLC[('d', ):('b', ):-1], SLC[15:3:-1])
assert_slices_equivalent(SLC['b':'d':-1], SLC[:0])
assert_slices_equivalent(SLC[('c', 2)::-1], SLC[10::-1])
assert_slices_equivalent(SLC[:('c', 2):-1], SLC[:9:-1])
assert_slices_equivalent(SLC[('e', 0):('c', 2):-1], SLC[16:9:-1])
def test_multiindex_slice_first_level(self):
# GH 12697
freq = ['a', 'b', 'c', 'd']
idx = MultiIndex.from_product([freq, np.arange(500)])
df = DataFrame(list(range(2000)), index=idx, columns=['Test'])
df_slice = df.loc[pd.IndexSlice[:, 30:70], :]
result = df_slice.loc['a']
expected = DataFrame(list(range(30, 71)),
columns=['Test'], index=range(30, 71))
tm.assert_frame_equal(result, expected)
result = df_slice.loc['d']
expected = DataFrame(list(range(1530, 1571)),
columns=['Test'], index=range(30, 71))
tm.assert_frame_equal(result, expected)
def test_multiindex_symmetric_difference(self):
# GH 13490
idx = MultiIndex.from_product([['a', 'b'], ['A', 'B']],
names=['a', 'b'])
result = idx ^ idx
assert result.names == idx.names
idx2 = idx.copy().rename(['A', 'B'])
result = idx ^ idx2
assert result.names == [None, None]
class TestMultiIndexSlicers(object):
def test_per_axis_per_level_getitem(self):
# GH6134
# example test case
ix = MultiIndex.from_product([_mklbl('A', 5), _mklbl('B', 7), _mklbl(
'C', 4), _mklbl('D', 2)])
df = DataFrame(np.arange(len(ix.get_values())), index=ix)
result = df.loc[(slice('A1', 'A3'), slice(None), ['C1', 'C3']), :]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C2' or c == 'C3')]]
result = df.loc[(slice('A1', 'A3'), slice(None), slice('C1', 'C3')), :]
tm.assert_frame_equal(result, expected)
# test multi-index slicing with per axis and per index controls
index = MultiIndex.from_tuples([('A', 1), ('A', 2),
('A', 3), ('B', 1)],
names=['one', 'two'])
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
('b', 'foo'), ('b', 'bah')],
names=['lvl0', 'lvl1'])
df = DataFrame(
np.arange(16, dtype='int64').reshape(
4, 4), index=index, columns=columns)
df = df.sort_index(axis=0).sort_index(axis=1)
# identity
result = df.loc[(slice(None), slice(None)), :]
tm.assert_frame_equal(result, df)
result = df.loc[(slice(None), slice(None)), (slice(None), slice(None))]
tm.assert_frame_equal(result, df)
result = df.loc[:, (slice(None), slice(None))]
tm.assert_frame_equal(result, df)
# index
result = df.loc[(slice(None), [1]), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), 1), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# columns
result = df.loc[:, (slice(None), ['foo'])]
expected = df.iloc[:, [1, 3]]
tm.assert_frame_equal(result, expected)
# both
result = df.loc[(slice(None), 1), (slice(None), ['foo'])]
expected = df.iloc[[0, 3], [1, 3]]
tm.assert_frame_equal(result, expected)
result = df.loc['A', 'a']
expected = DataFrame(dict(bar=[1, 5, 9], foo=[0, 4, 8]),
index=Index([1, 2, 3], name='two'),
columns=Index(['bar', 'foo'], name='lvl1'))
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), [1, 2]), :]
expected = df.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
# multi-level series
s = Series(np.arange(len(ix.get_values())), index=ix)
result = s.loc['A1':'A3', :, ['C1', 'C3']]
expected = s.loc[[tuple([a, b, c, d])
for a, b, c, d in s.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C3')]]
tm.assert_series_equal(result, expected)
# boolean indexers
result = df.loc[(slice(None), df.loc[:, ('a', 'bar')] > 5), :]
expected = df.iloc[[2, 3]]
tm.assert_frame_equal(result, expected)
def f():
df.loc[(slice(None), np.array([True, False])), :]
pytest.raises(ValueError, f)
# ambiguous cases
# these can be multiply interpreted (e.g. in this case
# as df.loc[slice(None),[1]] as well
pytest.raises(KeyError, lambda: df.loc[slice(None), [1]])
result = df.loc[(slice(None), [1]), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# not lexsorted
assert df.index.lexsort_depth == 2
df = df.sort_index(level=1, axis=0)
assert df.index.lexsort_depth == 0
with tm.assert_raises_regex(
UnsortedIndexError,
'MultiIndex slicing requires the index to be '
r'lexsorted: slicing on levels \[1\], lexsort depth 0'):
df.loc[(slice(None), slice('bar')), :]
# GH 16734: not sorted, but no real slicing
result = df.loc[(slice(None), df.loc[:, ('a', 'bar')] > 5), :]
tm.assert_frame_equal(result, df.iloc[[1, 3], :])
def test_multiindex_slicers_non_unique(self):
# GH 7106
# non-unique mi index support
df = (DataFrame(dict(A=['foo', 'foo', 'foo', 'foo'],
B=['a', 'a', 'a', 'a'],
C=[1, 2, 1, 3],
D=[1, 2, 3, 4]))
.set_index(['A', 'B', 'C']).sort_index())
assert not df.index.is_unique
expected = (DataFrame(dict(A=['foo', 'foo'], B=['a', 'a'],
C=[1, 1], D=[1, 3]))
.set_index(['A', 'B', 'C']).sort_index())
result = df.loc[(slice(None), slice(None), 1), :]
tm.assert_frame_equal(result, expected)
# this is equivalent of an xs expression
result = df.xs(1, level=2, drop_level=False)
tm.assert_frame_equal(result, expected)
df = (DataFrame(dict(A=['foo', 'foo', 'foo', 'foo'],
B=['a', 'a', 'a', 'a'],
C=[1, 2, 1, 2],
D=[1, 2, 3, 4]))
.set_index(['A', 'B', 'C']).sort_index())
assert not df.index.is_unique
expected = (DataFrame(dict(A=['foo', 'foo'], B=['a', 'a'],
C=[1, 1], D=[1, 3]))
.set_index(['A', 'B', 'C']).sort_index())
result = df.loc[(slice(None), slice(None), 1), :]
assert not result.index.is_unique
tm.assert_frame_equal(result, expected)
# GH12896
# numpy-implementation dependent bug
ints = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 13, 14, 14, 16,
17, 18, 19, 200000, 200000]
n = len(ints)
idx = MultiIndex.from_arrays([['a'] * n, ints])
result = Series([1] * n, index=idx)
result = result.sort_index()
result = result.loc[(slice(None), slice(100000))]
expected = Series([1] * (n - 2), index=idx[:-2]).sort_index()
tm.assert_series_equal(result, expected)
def test_multiindex_slicers_datetimelike(self):
# GH 7429
# buggy/inconsistent behavior when slicing with datetime-like
import datetime
dates = [datetime.datetime(2012, 1, 1, 12, 12, 12) +
datetime.timedelta(days=i) for i in range(6)]
freq = [1, 2]
index = MultiIndex.from_product(
[dates, freq], names=['date', 'frequency'])
df = DataFrame(
np.arange(6 * 2 * 4, dtype='int64').reshape(
-1, 4), index=index, columns=list('ABCD'))
# multi-axis slicing
idx = pd.IndexSlice
expected = df.iloc[[0, 2, 4], [0, 1]]
result = df.loc[(slice(Timestamp('2012-01-01 12:12:12'),
Timestamp('2012-01-03 12:12:12')),
slice(1, 1)), slice('A', 'B')]
tm.assert_frame_equal(result, expected)
result = df.loc[(idx[Timestamp('2012-01-01 12:12:12'):Timestamp(
'2012-01-03 12:12:12')], idx[1:1]), slice('A', 'B')]
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(Timestamp('2012-01-01 12:12:12'),
Timestamp('2012-01-03 12:12:12')), 1),
slice('A', 'B')]
tm.assert_frame_equal(result, expected)
# with strings
result = df.loc[(slice('2012-01-01 12:12:12', '2012-01-03 12:12:12'),
slice(1, 1)), slice('A', 'B')]
tm.assert_frame_equal(result, expected)
result = df.loc[(idx['2012-01-01 12:12:12':'2012-01-03 12:12:12'], 1),
idx['A', 'B']]
tm.assert_frame_equal(result, expected)
def test_multiindex_slicers_edges(self):
# GH 8132
# various edge cases
df = DataFrame(
{'A': ['A0'] * 5 + ['A1'] * 5 + ['A2'] * 5,
'B': ['B0', 'B0', 'B1', 'B1', 'B2'] * 3,
'DATE': ["2013-06-11", "2013-07-02", "2013-07-09", "2013-07-30",
"2013-08-06", "2013-06-11", "2013-07-02", "2013-07-09",
"2013-07-30", "2013-08-06", "2013-09-03", "2013-10-01",
"2013-07-09", "2013-08-06", "2013-09-03"],
'VALUES': [22, 35, 14, 9, 4, 40, 18, 4, 2, 5, 1, 2, 3, 4, 2]})
df['DATE'] = pd.to_datetime(df['DATE'])
df1 = df.set_index(['A', 'B', 'DATE'])
df1 = df1.sort_index()
# A1 - Get all values under "A0" and "A1"
result = df1.loc[(slice('A1')), :]
expected = df1.iloc[0:10]
tm.assert_frame_equal(result, expected)
# A2 - Get all values from the start to "A2"
result = df1.loc[(slice('A2')), :]
expected = df1
tm.assert_frame_equal(result, expected)
# A3 - Get all values under "B1" or "B2"
result = df1.loc[(slice(None), slice('B1', 'B2')), :]
expected = df1.iloc[[2, 3, 4, 7, 8, 9, 12, 13, 14]]
tm.assert_frame_equal(result, expected)
# A4 - Get all values between 2013-07-02 and 2013-07-09
result = df1.loc[(slice(None), slice(None),
slice('20130702', '20130709')), :]
expected = df1.iloc[[1, 2, 6, 7, 12]]
tm.assert_frame_equal(result, expected)
# B1 - Get all values in B0 that are also under A0, A1 and A2
result = df1.loc[(slice('A2'), slice('B0')), :]
expected = df1.iloc[[0, 1, 5, 6, 10, 11]]
tm.assert_frame_equal(result, expected)
# B2 - Get all values in B0, B1 and B2 (similar to what #2 is doing for
# the As)
result = df1.loc[(slice(None), slice('B2')), :]
expected = df1
tm.assert_frame_equal(result, expected)
# B3 - Get all values from B1 to B2 and up to 2013-08-06
result = df1.loc[(slice(None), slice('B1', 'B2'),
slice('2013-08-06')), :]
expected = df1.iloc[[2, 3, 4, 7, 8, 9, 12, 13]]
tm.assert_frame_equal(result, expected)
# B4 - Same as A4 but the start of the date slice is not a key.
# shows indexing on a partial selection slice
result = df1.loc[(slice(None), slice(None),
slice('20130701', '20130709')), :]
expected = df1.iloc[[1, 2, 6, 7, 12]]
tm.assert_frame_equal(result, expected)
def test_per_axis_per_level_doc_examples(self):
# test index maker
idx = pd.IndexSlice
# from indexing.rst / advanced
index = MultiIndex.from_product([_mklbl('A', 4), _mklbl('B', 2),
_mklbl('C', 4), _mklbl('D', 2)])
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
('b', 'foo'), ('b', 'bah')],
names=['lvl0', 'lvl1'])
df = DataFrame(np.arange(len(index) * len(columns), dtype='int64')
.reshape((len(index), len(columns))),
index=index, columns=columns)
result = df.loc[(slice('A1', 'A3'), slice(None), ['C1', 'C3']), :]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
result = df.loc[idx['A1':'A3', :, ['C1', 'C3']], :]
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), slice(None), ['C1', 'C3']), :]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
result = df.loc[idx[:, :, ['C1', 'C3']], :]
tm.assert_frame_equal(result, expected)
# not sorted
def f():
df.loc['A1', ('a', slice('foo'))]
pytest.raises(UnsortedIndexError, f)
# GH 16734: not sorted, but no real slicing
tm.assert_frame_equal(df.loc['A1', (slice(None), 'foo')],
df.loc['A1'].iloc[:, [0, 2]])
df = df.sort_index(axis=1)
# slicing
df.loc['A1', (slice(None), 'foo')]
df.loc[(slice(None), slice(None), ['C1', 'C3']), (slice(None), 'foo')]
# setitem
df.loc(axis=0)[:, :, ['C1', 'C3']] = -10
def test_loc_axis_arguments(self):
index = MultiIndex.from_product([_mklbl('A', 4), _mklbl('B', 2),
_mklbl('C', 4), _mklbl('D', 2)])
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
('b', 'foo'), ('b', 'bah')],
names=['lvl0', 'lvl1'])
df = DataFrame(np.arange(len(index) * len(columns), dtype='int64')
.reshape((len(index), len(columns))),
index=index,
columns=columns).sort_index().sort_index(axis=1)
# axis 0
result = df.loc(axis=0)['A1':'A3', :, ['C1', 'C3']]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
result = df.loc(axis='index')[:, :, ['C1', 'C3']]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
# axis 1
result = df.loc(axis=1)[:, 'foo']
expected = df.loc[:, (slice(None), 'foo')]
tm.assert_frame_equal(result, expected)
result = df.loc(axis='columns')[:, 'foo']
expected = df.loc[:, (slice(None), 'foo')]
tm.assert_frame_equal(result, expected)
# invalid axis
def f():
df.loc(axis=-1)[:, :, ['C1', 'C3']]
pytest.raises(ValueError, f)
def f():
df.loc(axis=2)[:, :, ['C1', 'C3']]
pytest.raises(ValueError, f)
def f():
df.loc(axis='foo')[:, :, ['C1', 'C3']]
pytest.raises(ValueError, f)
def test_per_axis_per_level_setitem(self):
# test index maker
idx = pd.IndexSlice
# test multi-index slicing with per axis and per index controls
index = MultiIndex.from_tuples([('A', 1), ('A', 2),
('A', 3), ('B', 1)],
names=['one', 'two'])
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
('b', 'foo'), ('b', 'bah')],
names=['lvl0', 'lvl1'])
df_orig = DataFrame(
np.arange(16, dtype='int64').reshape(
4, 4), index=index, columns=columns)
df_orig = df_orig.sort_index(axis=0).sort_index(axis=1)
# identity
df = df_orig.copy()
df.loc[(slice(None), slice(None)), :] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc(axis=0)[:, :] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None), slice(None)), (slice(None), slice(None))] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, (slice(None), slice(None))] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
# index
df = df_orig.copy()
df.loc[(slice(None), [1]), :] = 100
expected = df_orig.copy()
expected.iloc[[0, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None), 1), :] = 100
expected = df_orig.copy()
expected.iloc[[0, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc(axis=0)[:, 1] = 100
expected = df_orig.copy()
expected.iloc[[0, 3]] = 100
tm.assert_frame_equal(df, expected)
# columns
df = df_orig.copy()
df.loc[:, (slice(None), ['foo'])] = 100
expected = df_orig.copy()
expected.iloc[:, [1, 3]] = 100
tm.assert_frame_equal(df, expected)
# both
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] = 100
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[idx[:, 1], idx[:, ['foo']]] = 100
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc['A', 'a'] = 100
expected = df_orig.copy()
expected.iloc[0:3, 0:2] = 100
tm.assert_frame_equal(df, expected)
# setting with a list-like
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] = np.array(
[[100, 100], [100, 100]], dtype='int64')
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = 100
tm.assert_frame_equal(df, expected)
# not enough values
df = df_orig.copy()
def f():
df.loc[(slice(None), 1), (slice(None), ['foo'])] = np.array(
[[100], [100, 100]], dtype='int64')
pytest.raises(ValueError, f)
def f():
df.loc[(slice(None), 1), (slice(None), ['foo'])] = np.array(
[100, 100, 100, 100], dtype='int64')
pytest.raises(ValueError, f)
# with an alignable rhs
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] = df.loc[(slice(
None), 1), (slice(None), ['foo'])] * 5
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = expected.iloc[[0, 3], [1, 3]] * 5
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] *= df.loc[(slice(
None), 1), (slice(None), ['foo'])]
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] *= expected.iloc[[0, 3], [1, 3]]
tm.assert_frame_equal(df, expected)
rhs = df_orig.loc[(slice(None), 1), (slice(None), ['foo'])].copy()
rhs.loc[:, ('c', 'bah')] = 10
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] *= rhs
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] *= expected.iloc[[0, 3], [1, 3]]
tm.assert_frame_equal(df, expected)
class TestMultiIndexPanel(object):
def test_iloc_getitem_panel_multiindex(self):
with catch_warnings(record=True):
# GH 7199
# Panel with multi-index
multi_index = MultiIndex.from_tuples([('ONE', 'one'),
('TWO', 'two'),
('THREE', 'three')],
names=['UPPER', 'lower'])
simple_index = [x[0] for x in multi_index]
wd1 = Panel(items=['First', 'Second'],
major_axis=['a', 'b', 'c', 'd'],
minor_axis=multi_index)
wd2 = Panel(items=['First', 'Second'],
major_axis=['a', 'b', 'c', 'd'],
minor_axis=simple_index)
expected1 = wd1['First'].iloc[[True, True, True, False], [0, 2]]
result1 = wd1.iloc[0, [True, True, True, False], [0, 2]] # WRONG
tm.assert_frame_equal(result1, expected1)
expected2 = wd2['First'].iloc[[True, True, True, False], [0, 2]]
result2 = wd2.iloc[0, [True, True, True, False], [0, 2]]
tm.assert_frame_equal(result2, expected2)
expected1 = DataFrame(index=['a'], columns=multi_index,
dtype='float64')
result1 = wd1.iloc[0, [0], [0, 1, 2]]
tm.assert_frame_equal(result1, expected1)
expected2 = DataFrame(index=['a'], columns=simple_index,
dtype='float64')
result2 = wd2.iloc[0, [0], [0, 1, 2]]
tm.assert_frame_equal(result2, expected2)
# GH 7516
mi = MultiIndex.from_tuples([(0, 'x'), (1, 'y'), (2, 'z')])
p = Panel(np.arange(3 * 3 * 3, dtype='int64').reshape(3, 3, 3),
items=['a', 'b', 'c'], major_axis=mi,
minor_axis=['u', 'v', 'w'])
result = p.iloc[:, 1, 0]
expected = Series([3, 12, 21], index=['a', 'b', 'c'], name='u')
tm.assert_series_equal(result, expected)
result = p.loc[:, (1, 'y'), 'u']
tm.assert_series_equal(result, expected)
def test_panel_setitem_with_multiindex(self):
with catch_warnings(record=True):
# 10360
# failing with a multi-index
arr = np.array([[[1, 2, 3], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]],
dtype=np.float64)
# reg index
axes = dict(items=['A', 'B'], major_axis=[0, 1],
minor_axis=['X', 'Y', 'Z'])
p1 = Panel(0., **axes)
p1.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p1, expected)
# multi-indexes
axes['items'] = MultiIndex.from_tuples(
[('A', 'a'), ('B', 'b')])
p2 = Panel(0., **axes)
p2.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p2, expected)
axes['major_axis'] = MultiIndex.from_tuples(
[('A', 1), ('A', 2)])
p3 = Panel(0., **axes)
p3.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p3, expected)
axes['minor_axis'] = MultiIndex.from_product(
[['X'], range(3)])
p4 = Panel(0., **axes)
p4.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p4, expected)
arr = np.array(
[[[1, 0, 0], [2, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
dtype=np.float64)
p5 = Panel(0., **axes)
p5.iloc[0, :, 0] = [1, 2]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p5, expected)
| bsd-3-clause |
gagneurlab/concise | concise/utils/pwm.py | 2 | 10205 | import numpy as np
import copy
from concise.preprocessing.sequence import DNA, _get_vocab_dict
from io import StringIO
import gzip
from concise.utils.plot import seqlogo, seqlogo_fig
import matplotlib.pyplot as plt
DEFAULT_LETTER_TO_INDEX = _get_vocab_dict(DNA)
DEFAULT_INDEX_TO_LETTER = dict((DEFAULT_LETTER_TO_INDEX[x], x)
for x in DEFAULT_LETTER_TO_INDEX)
DEFAULT_BASE_BACKGROUND = {"A": .25, "C": .25, "G": .25, "T": .25}
class PWM(object):
"""Class holding the position-weight matrix (PWM)
# Arguments
pwm: PWM matrix of shape `(seq_len, 4)`. All elements need to be larger or equal to 0.
name: str, optional name argument
# Attributes
pwm: np.array of shape `(seq_len, 4)`. All rows sum to 1
name: PWM name
# Methods
- **plotPWM(figsize=(10, 2))** - Make a sequence logo plot from the pwm.
Letter height corresponds to the probability.
- **plotPWMInfo(figsize=(10, 2))** - Make the sequence logo plot with information content
corresponding to the letter height.
- **get_pssm(background_probs=DEFAULT_BASE_BACKGROUND)** - Get the position-specific scoring matrix (PSSM)
cumputed as `np.log(pwm / b)`, where b are the background base probabilities..
- **plotPWMInfo(background_probs=DEFAULT_BASE_BACKGROUND, figsize=(10, 2))** - Make the sequence logo plot with
letter height corresponding to the position-specific scoring matrix (PSSM).
- **normalize()** - force all rows to sum to 1.
- **get_consensus()** - returns the consensus sequence
# Class methods
- **from_consensus(consensus_seq, background_proportion=0.1, name=None)** - Construct PWM from a consensus sequence
- **consensus_seq**: string representing the consensus sequence (ex: ACTGTAT)
- **background_proportion**: Let's denote it with a. The row in the resulting PWM
will be: `'C' -> [a/3, a/3, 1-a, a/3]`
- **name** - PWM.name.
- **from_background(length=9, name=None, probs=DEFAULT_BASE_BACKGROUND)** - Create a background PWM.
"""
letterToIndex = DEFAULT_LETTER_TO_INDEX
indexToLetter = DEFAULT_INDEX_TO_LETTER
def __init__(self, pwm, name=None):
self.pwm = np.asarray(pwm) # needs to by np.array
self.name = name
# if type(pwm).__module__ != np.__name__:
# raise Exception("pwm needs to by a numpy array")
if self.pwm.shape[1] != 4 and len(self.pwm.shape) == 2:
raise Exception("pwm needs to be n*4, n is pwm_lenght")
if np.any(self.pwm < 0):
raise Exception("All pwm elements need to be positive")
if not np.all(np.sum(self.pwm, axis=1) > 0):
raise Exception("All pwm rows need to have sum > 0")
# all elements need to be >0
assert np.all(self.pwm >= 0)
# normalize the pwm
self.normalize()
def normalize(self):
rows = np.sum(self.pwm, axis=1)
self.pwm = self.pwm / rows.reshape([-1, 1])
def get_consensus(self):
max_idx = self.pwm.argmax(axis=1)
return ''.join([self.indexToLetter[x] for x in max_idx])
def __repr__(self):
return self.__str__()
def __str__(self):
return "PWM(name: {0}, consensus: {1})".format(self.name, self.get_consensus())
@classmethod
def from_consensus(cls, consensus_seq, background_proportion=0.1, name=None):
pwm = np.zeros((len(consensus_seq), 4))
pwm += background_proportion / 3
for (i, l) in enumerate(consensus_seq):
b = cls.letterToIndex[l]
pwm[i, b] = 1 - background_proportion
return cls(pwm, name=name)
@classmethod
def _background_pwm(cls, length=9, probs=DEFAULT_BASE_BACKGROUND):
barr = background_probs2array(probs, indexToLetter=cls.indexToLetter)
pwm = np.array([barr for i in range(length)])
if length == 0:
pwm = pwm.reshape([0, 4])
return pwm
@classmethod
def from_background(cls, length=9, name=None, probs=DEFAULT_BASE_BACKGROUND):
return PWM(cls._background_pwm(length, probs),
name=name)
def _change_length(self, new_length, probs=DEFAULT_BASE_BACKGROUND):
length = self.pwm.shape[0]
len_diff = new_length - length
if (len_diff < 0):
# symmetrically remove
remove_start = abs(len_diff) // 2
remove_end = abs(len_diff) // 2 + abs(len_diff) % 2
self.pwm = self.pwm[remove_start:(length - remove_end), :]
if (len_diff > 0):
add_start = len_diff // 2 + len_diff % 2
add_end = len_diff // 2
# concatenate two arrays
pwm_start = self._background_pwm(add_start, probs=probs)
pwm_end = self._background_pwm(add_end, probs=probs)
self.pwm = np.concatenate([pwm_start, self.pwm, pwm_end], axis=0)
self.normalize()
return self
def get_config(self):
return {"pwm": self.pwm.tolist(), # convert numpyarray to list
"name": self.name
}
@classmethod
def from_config(cls, pwm_dict):
return cls(**pwm_dict)
def plotPWM(self, figsize=(10, 2)):
pwm = self.pwm
fig = seqlogo_fig(pwm, vocab="DNA", figsize=figsize)
plt.ylabel("Probability")
return fig
def plotPWMInfo(self, figsize=(10, 2)):
pwm = self.pwm
info = _pwm2pwm_info(pwm)
fig = seqlogo_fig(info, vocab="DNA", figsize=figsize)
plt.ylabel("Bits")
return fig
def get_pssm(self, background_probs=DEFAULT_BASE_BACKGROUND):
b = background_probs2array(background_probs)
b = b.reshape([1, 4])
return np.log(self.pwm / b).astype(self.pwm.dtype)
def plotPSSM(self, background_probs=DEFAULT_BASE_BACKGROUND, figsize=(10, 2)):
pssm = self.get_pssm()
return seqlogo_fig(pssm, vocab="DNA", figsize=figsize)
def _pwm2pwm_info(pwm):
# normalize pwm to sum 1,
# otherwise pwm is not a valid distribution
# then info has negative values
col_sums = pwm.sum(1)
pwm = pwm / col_sums[:, np.newaxis]
H = - np.sum(pwm * np.log2(pwm), axis=1)
R = np.log2(4) - H
info = pwm * R[:, np.newaxis, ...]
return info
def _check_background_probs(background_probs):
assert isinstance(background_probs, list)
assert sum(background_probs) == 1.0
assert len(background_probs) == 4
for b in background_probs:
assert b >= 0
assert b <= 1
def pwm_list2pwm_array(pwm_list, shape=(None, 4, None), dtype=None, background_probs=DEFAULT_BASE_BACKGROUND):
# print("shape: ", shape)
if shape[1] is not 4:
raise ValueError("shape[1] has to be 4 and not {0}".format(shape[1]))
# copy pwm_list
pwm_list = copy.deepcopy(pwm_list)
n_motifs = len(pwm_list)
# set the default values
shape = list(shape)
if shape[0] is None:
if len(pwm_list) == 0:
raise ValueError("Max pwm length can't be inferred for empty pwm list")
# max pwm length
shape[0] = max([pwm.pwm.shape[0] for pwm in pwm_list])
if shape[2] is None:
if len(pwm_list) == 0:
raise ValueError("n_motifs can't be inferred for empty pwm list")
shape[2] = n_motifs
# (kernel_size, 4, filters)
required_motif_len = shape[0]
required_n_motifs = shape[2]
# fix n_motifs
if required_n_motifs > n_motifs:
add_n_pwm = required_n_motifs - n_motifs
pwm_list += [PWM.from_background(length=required_motif_len, probs=background_probs)] * add_n_pwm
if required_n_motifs < n_motifs:
print("Removing {0} pwm's from pwm_list".format(n_motifs - required_n_motifs))
pwm_list = pwm_list[:required_n_motifs]
# fix motif_len
pwm_list = [pwm._change_length(required_motif_len, probs=background_probs) for pwm in pwm_list]
# stack the matrices along the last axis
pwm_array = np.stack([pwm.pwm for pwm in pwm_list], axis=-1)
pwm_array = pwm_array.astype(dtype)
# change the axis order
return pwm_array
def background_probs2array(background_probs, indexToLetter=DEFAULT_INDEX_TO_LETTER):
barr = [background_probs[indexToLetter[i]] for i in range(4)]
_check_background_probs(barr)
return np.asarray(barr)
def pwm_array2pssm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND):
"""Convert pwm array to pssm array
"""
b = background_probs2array(background_probs)
b = b.reshape([1, 4, 1])
return np.log(arr / b).astype(arr.dtype)
def pssm_array2pwm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND):
"""Convert pssm array to pwm array
"""
b = background_probs2array(background_probs)
b = b.reshape([1, 4, 1])
return (np.exp(arr) * b).astype(arr.dtype)
def load_motif_db(filename, skipn_matrix=0):
"""Read the motif file in the following format
```
>motif_name
<skip n>0.1<delim>0.2<delim>0.5<delim>0.6
...
>motif_name2
....
```
Delim can be anything supported by np.loadtxt
# Arguments
filename: str, file path
skipn_matrix: integer, number of characters to skip when reading
the numeric matrix (for Encode = 2)
# Returns
Dictionary of numpy arrays
"""
# read-lines
if filename.endswith(".gz"):
f = gzip.open(filename, 'rt', encoding='utf-8')
else:
f = open(filename, 'r')
lines = f.readlines()
f.close()
motifs_dict = {}
motif_lines = ""
motif_name = None
def lines2matrix(lines):
return np.loadtxt(StringIO(lines))
for line in lines:
if line.startswith(">"):
if motif_lines:
# lines -> matrix
motifs_dict[motif_name] = lines2matrix(motif_lines)
motif_name = line[1:].strip()
motif_lines = ""
else:
motif_lines += line[skipn_matrix:]
if motif_lines and motif_name is not None:
motifs_dict[motif_name] = lines2matrix(motif_lines)
return motifs_dict
| mit |
ua-snap/downscale | snap_scripts/data_requests/extract_profiles_keith_dot_precip_cmip5.py | 1 | 5812 | # extraction for Keith Cunningham -- Glitter Gulch DOT
def make_gdf():
df = pd.DataFrame({'name':['Long Lake','Glitter Gulch'],
'lat':[61.8,63.76],'lon':[-148.2,-148.9]})
df['geometry'] = df.apply(lambda x: Point(x.lon, x.lat), axis=1)
shp = gpd.GeoDataFrame( df, crs={'init':'epsg:4326'}, geometry='geometry')
return shp.to_crs( epsg=3338 )
def list_data( base_dir ):
files = glob.glob( os.path.join( cur_path, '*.tif' ) )
df = pd.DataFrame([ os.path.basename(fn).split('.')[0].split('_')[-2:] for fn in files ], columns=['month','year'])
df['fn'] = files
return df.sort_values(['year','month']).reset_index(drop=True)
def rasterize( shapes, coords, latitude='lat', longitude='lon', fill=None, **kwargs ):
'''
Rasterize a list of (geometry, fill_value) tuples onto the given
xarray coordinates. This only works for 1d latitude and longitude
arrays.
ARGUMENTS:
----------
shapes = [list] of tuples of (shapely.geom, fill_value)
coords = [dict] of named 1d latitude and longitude arrays.
latitude = [str] name of latitude key. default:'latitude'
longitude = [str] name of longitude key. default:'longitude'
fill = fill_value
RETURNS:
--------
xarray.DataArray
'''
from rasterio import features
import xarray as xr
if fill == None:
fill = np.nan
transform = transform_from_latlon( coords[ latitude ], coords[ longitude ] )
out_shape = ( len( coords[ latitude ] ), len( coords[ longitude ] ) )
raster = features.rasterize( shapes, out_shape=out_shape,
fill=fill, transform=transform,
dtype=float, **kwargs )
# spatial_coords = {latitude: coords[latitude], longitude: coords[longitude]}
# return xr.DataArray(raster, coords=spatial_coords, dims=(latitude, longitude))
return raster
def make_mask( fn ):
with rasterio.open(fn) as rst:
meta = rst.meta.copy()
arr = np.empty_like(rst.read(1))
# build the shape data we need in EPSG:3338
shp = make_gdf()
pts = shp.geometry.tolist()
pts = [ (i,count+1) for count,i in enumerate(pts) ]
return rasterize( pts, fill=0, out=arr, transform=meta['transform'], all_touched=True, dtype='float32' )
def open_raster( fn, band=1 ):
with rasterio.open(fn) as rst:
arr = rst.read(band)
return arr
def extract_values( files, mask ):
pool = mp.Pool(64)
f = partial(open_raster, band=1)
arr = np.array(pool.map( f, files ))
pool.close()
pool.join()
pool = None; del pool
mask_vals = np.unique(mask[mask > 0])
out = dict()
for mask_val in mask_vals:
ind = zip(*np.where(mask == mask_val))
for i,j in ind:
out.update({ mask_val:arr[:,i,j] })
del arr
return out
if __name__ == '__main__':
import os, glob, rasterio, itertools
import pandas as pd
import numpy as np
import rasterio
from rasterio.features import rasterize
from shapely.geometry import Point
import geopandas as gpd
from functools import partial
import multiprocessing as mp
base_dir = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/downscaled'
output_dir = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/project_data_delivery/Keith_DOT_extractions'
template_fn = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/downscaled/5ModelAvg/historical/tasmin/tasmin_mean_C_ar5_5ModelAvg_historical_08_2004.tif'
models = ['5ModelAvg', 'GFDL-CM3', 'GISS-E2-R', 'IPSL-CM5A-LR', 'MRI-CGCM3', 'NCAR-CCSM4']
scenarios = ['historical','rcp45','rcp60','rcp85']
variables = ['pr']
# make mask
mask = make_mask( template_fn )
output_dict = dict()
for model, scenario, variable in itertools.product(models, scenarios, variables):
cur_path = os.path.join(base_dir,model,scenario,variable)
files_df = list_data( cur_path ) # these are sorted
decade_grouper = files_df.apply(lambda x:str(x.year)[:3], axis=1)
file_groups = [ j.fn.tolist() for i,j in files_df.groupby( decade_grouper ) ]
out = [ extract_values( files, mask ) for files in file_groups ]
out_df = pd.concat([ pd.DataFrame(i) for i in out ]) # stack the file groups chronologically
out_key = '{}_{}_{}'.format( model, scenario, variable )
output_dict[ out_key ] = out_df
print( 'completed:{}'.format(out_key) )
# future index
future_dates = pd.date_range('2006-01','2101-01',freq='M')
future_dates = [ [str(i.year), str(i.month)] for i in future_dates ]
future_dates = [ '-'.join([y,'0'+m]) if len(m) == 1 else '-'.join([y,m]) for y,m in future_dates ]
# historical index -- data needs slicing...
historical_dates = pd.date_range('1900-01','2006-01',freq='M')
historical_dates = [[str(i.month), str(i.year)] for i in historical_dates ]
historical_dates = [ '-'.join([y,'0'+m]) if len(m) == 1 else '-'.join([y,m]) for y,m in historical_dates ]
# make data frames
df1_future, df2_future = [pd.DataFrame({key:np.array(output_dict[key][i]) for key in output_dict if 'historical' not in key }, index=future_dates) for i in [1,2]]
df1_historical, df2_historical = [pd.DataFrame({key:np.array(output_dict[key][i])[-len(historical_dates):] for key in output_dict if 'historical' in key }, index=historical_dates) for i in [1,2]]
# dump them to disk
naming_lookup = {1:'LongLake', 2:'GlitterGulch'}
df1_future_fn = 'precipitation_cmip5_allmodels_allscenarios_futures_2006-2100_LongLake_AK.csv'
df2_future_fn = 'precipitation_cmip5_allmodels_allscenarios_futures_2006-2100_GlitterGulch_AK.csv'
df1_historical_fn = 'precipitation_cmip5_allmodels_allscenarios_historical_1900-2005_LongLake_AK.csv'
df2_historical_fn = 'precipitation_cmip5_allmodels_allscenarios_historical_1900-2005_GlitterGulch_AK.csv'
df1_future.to_csv( os.path.join( output_dir, df1_future_fn), sep=',' )
df2_future.to_csv( os.path.join( output_dir, df2_future_fn), sep=',' )
df1_historical.to_csv( os.path.join( output_dir, df1_historical_fn), sep=',' )
df2_historical.to_csv( os.path.join( output_dir, df2_historical_fn), sep=',' )
| mit |
gwtsa/gwtsa | pastas/recharge/recharge_func.py | 1 | 2673 | """recharge_func module
Author: R.A. Collenteur, University of Graz
Contains the classes for the different models that are available to calculate
the recharge from precipitation and evaporation data.
Each Recharge class contains at least the following:
Attributes
----------
nparam: int
Number of parameters needed for this model.
Functions
---------
get_init_parameters(self, name)
A function that returns a Pandas DataFrame of the parameters of the
recharge function. Columns of the dataframe need to be ['value', 'pmin',
'pmax', 'vary']. Rows of the DataFrame have names of the parameters. Input
name is used as a prefix. This function is called by a stressmodel object.
simulate(self, evap, prec, p=None)
A function that returns an array of the simulated recharge series.
"""
import numpy as np
import pandas as pd
class RechargeBase:
"""Base class for classes that calculate the recharge.
"""
def __init__(self):
self.temp = False
self.nparam = 0
@staticmethod
def get_init_parameters(name="recharge"):
"""
Parameters
----------
name: str, optional
String with the name that is used as prefix for the parameters.
Returns
-------
parameters: pandas.DataFrame
Pandas DataFrame with the parameters.
"""
parameters = pd.DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
return parameters
def simulate(self, prec, evap, p, temp=None):
pass
class Linear(RechargeBase):
"""Linear recharge model.
The recharge to the groundwater is calculated as:
R = P - f * E
"""
_name = "Linear"
def __init__(self):
RechargeBase.__init__(self)
self.nparam = 1
def get_init_parameters(self, name="recharge"):
parameters = pd.DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
parameters.loc[name + '_f'] = (-1.0, -2.0, 0.0, True, name)
return parameters
def simulate(self, prec, evap, p, **kwargs):
"""
Parameters
----------
prec, evap: array_like
array with the precipitation and evaporation values. These
arrays must be of the same length and at the same time steps.
p: float
parameter value used in recharge calculation.
Returns
-------
recharge: array_like
array with the recharge series.
"""
return np.add(prec, np.multiply(evap, p))
| mit |
KDD-OpenSource/geox-young-academy | day-2/exercises/pytorch-exercise-solution.py | 1 | 3635 | import torch
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np
### Task 1
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
### Task 2
dataiter = iter(trainloader)
images, labels = dataiter.next()
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
# show images
imshow(torchvision.utils.make_grid(images))
### Task 3
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
print(net)
### Task 4
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for epoch in range(10): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.data[0]
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
### Task 5
dataiter = iter(testloader)
images, labels = dataiter.next()
outputs = net(Variable(images))
_, predicted = torch.max(outputs.data, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4)))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
imshow(torchvision.utils.make_grid(images))
### Task 6
correct = 0
total = 0
for data in testloader:
images, labels = data
outputs = net(Variable(images))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
| mit |
mortada/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
jpautom/scikit-learn | sklearn/setup.py | 19 | 2994 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('_build_utils')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
config.add_subpackage('model_selection')
config.add_subpackage('model_selection/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
pnedunuri/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 213 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
sdoran35/hate-to-hugs | venv/lib/python3.6/site-packages/nltk/parse/transitionparser.py | 7 | 31342 | # Natural Language Toolkit: Arc-Standard and Arc-eager Transition Based Parsers
#
# Author: Long Duong <[email protected]>
#
# Copyright (C) 2001-2017 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import pickle
from os import remove
from copy import deepcopy
from operator import itemgetter
try:
from numpy import array
from scipy import sparse
from sklearn.datasets import load_svmlight_file
from sklearn import svm
except ImportError:
pass
from nltk.parse import ParserI, DependencyGraph, DependencyEvaluator
class Configuration(object):
"""
Class for holding configuration which is the partial analysis of the input sentence.
The transition based parser aims at finding set of operators that transfer the initial
configuration to the terminal configuration.
The configuration includes:
- Stack: for storing partially proceeded words
- Buffer: for storing remaining input words
- Set of arcs: for storing partially built dependency tree
This class also provides a method to represent a configuration as list of features.
"""
def __init__(self, dep_graph):
"""
:param dep_graph: the representation of an input in the form of dependency graph.
:type dep_graph: DependencyGraph where the dependencies are not specified.
"""
# dep_graph.nodes contain list of token for a sentence
self.stack = [0] # The root element
self.buffer = list(range(1, len(dep_graph.nodes))) # The rest is in the buffer
self.arcs = [] # empty set of arc
self._tokens = dep_graph.nodes
self._max_address = len(self.buffer)
def __str__(self):
return 'Stack : ' + \
str(self.stack) + ' Buffer : ' + str(self.buffer) + ' Arcs : ' + str(self.arcs)
def _check_informative(self, feat, flag=False):
"""
Check whether a feature is informative
The flag control whether "_" is informative or not
"""
if feat is None:
return False
if feat == '':
return False
if flag is False:
if feat == '_':
return False
return True
def extract_features(self):
"""
Extract the set of features for the current configuration. Implement standard features as describe in
Table 3.2 (page 31) in Dependency Parsing book by Sandra Kubler, Ryan McDonal, Joakim Nivre.
Please note that these features are very basic.
:return: list(str)
"""
result = []
# Todo : can come up with more complicated features set for better
# performance.
if len(self.stack) > 0:
# Stack 0
stack_idx0 = self.stack[len(self.stack) - 1]
token = self._tokens[stack_idx0]
if self._check_informative(token['word'], True):
result.append('STK_0_FORM_' + token['word'])
if 'lemma' in token and self._check_informative(token['lemma']):
result.append('STK_0_LEMMA_' + token['lemma'])
if self._check_informative(token['tag']):
result.append('STK_0_POS_' + token['tag'])
if 'feats' in token and self._check_informative(token['feats']):
feats = token['feats'].split("|")
for feat in feats:
result.append('STK_0_FEATS_' + feat)
# Stack 1
if len(self.stack) > 1:
stack_idx1 = self.stack[len(self.stack) - 2]
token = self._tokens[stack_idx1]
if self._check_informative(token['tag']):
result.append('STK_1_POS_' + token['tag'])
# Left most, right most dependency of stack[0]
left_most = 1000000
right_most = -1
dep_left_most = ''
dep_right_most = ''
for (wi, r, wj) in self.arcs:
if wi == stack_idx0:
if (wj > wi) and (wj > right_most):
right_most = wj
dep_right_most = r
if (wj < wi) and (wj < left_most):
left_most = wj
dep_left_most = r
if self._check_informative(dep_left_most):
result.append('STK_0_LDEP_' + dep_left_most)
if self._check_informative(dep_right_most):
result.append('STK_0_RDEP_' + dep_right_most)
# Check Buffered 0
if len(self.buffer) > 0:
# Buffer 0
buffer_idx0 = self.buffer[0]
token = self._tokens[buffer_idx0]
if self._check_informative(token['word'], True):
result.append('BUF_0_FORM_' + token['word'])
if 'lemma' in token and self._check_informative(token['lemma']):
result.append('BUF_0_LEMMA_' + token['lemma'])
if self._check_informative(token['tag']):
result.append('BUF_0_POS_' + token['tag'])
if 'feats' in token and self._check_informative(token['feats']):
feats = token['feats'].split("|")
for feat in feats:
result.append('BUF_0_FEATS_' + feat)
# Buffer 1
if len(self.buffer) > 1:
buffer_idx1 = self.buffer[1]
token = self._tokens[buffer_idx1]
if self._check_informative(token['word'], True):
result.append('BUF_1_FORM_' + token['word'])
if self._check_informative(token['tag']):
result.append('BUF_1_POS_' + token['tag'])
if len(self.buffer) > 2:
buffer_idx2 = self.buffer[2]
token = self._tokens[buffer_idx2]
if self._check_informative(token['tag']):
result.append('BUF_2_POS_' + token['tag'])
if len(self.buffer) > 3:
buffer_idx3 = self.buffer[3]
token = self._tokens[buffer_idx3]
if self._check_informative(token['tag']):
result.append('BUF_3_POS_' + token['tag'])
# Left most, right most dependency of stack[0]
left_most = 1000000
right_most = -1
dep_left_most = ''
dep_right_most = ''
for (wi, r, wj) in self.arcs:
if wi == buffer_idx0:
if (wj > wi) and (wj > right_most):
right_most = wj
dep_right_most = r
if (wj < wi) and (wj < left_most):
left_most = wj
dep_left_most = r
if self._check_informative(dep_left_most):
result.append('BUF_0_LDEP_' + dep_left_most)
if self._check_informative(dep_right_most):
result.append('BUF_0_RDEP_' + dep_right_most)
return result
class Transition(object):
"""
This class defines a set of transition which is applied to a configuration to get another configuration
Note that for different parsing algorithm, the transition is different.
"""
# Define set of transitions
LEFT_ARC = 'LEFTARC'
RIGHT_ARC = 'RIGHTARC'
SHIFT = 'SHIFT'
REDUCE = 'REDUCE'
def __init__(self, alg_option):
"""
:param alg_option: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
:type alg_option: str
"""
self._algo = alg_option
if alg_option not in [
TransitionParser.ARC_STANDARD,
TransitionParser.ARC_EAGER]:
raise ValueError(" Currently we only support %s and %s " %
(TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER))
def left_arc(self, conf, relation):
"""
Note that the algorithm for left-arc is quite similar except for precondition for both arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
return -1
if conf.buffer[0] == 0:
# here is the Root element
return -1
idx_wi = conf.stack[len(conf.stack) - 1]
flag = True
if self._algo == TransitionParser.ARC_EAGER:
for (idx_parent, r, idx_child) in conf.arcs:
if idx_child == idx_wi:
flag = False
if flag:
conf.stack.pop()
idx_wj = conf.buffer[0]
conf.arcs.append((idx_wj, relation, idx_wi))
else:
return -1
def right_arc(self, conf, relation):
"""
Note that the algorithm for right-arc is DIFFERENT for arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
return -1
if self._algo == TransitionParser.ARC_STANDARD:
idx_wi = conf.stack.pop()
idx_wj = conf.buffer[0]
conf.buffer[0] = idx_wi
conf.arcs.append((idx_wi, relation, idx_wj))
else: # arc-eager
idx_wi = conf.stack[len(conf.stack) - 1]
idx_wj = conf.buffer.pop(0)
conf.stack.append(idx_wj)
conf.arcs.append((idx_wi, relation, idx_wj))
def reduce(self, conf):
"""
Note that the algorithm for reduce is only available for arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if self._algo != TransitionParser.ARC_EAGER:
return -1
if len(conf.stack) <= 0:
return -1
idx_wi = conf.stack[len(conf.stack) - 1]
flag = False
for (idx_parent, r, idx_child) in conf.arcs:
if idx_child == idx_wi:
flag = True
if flag:
conf.stack.pop() # reduce it
else:
return -1
def shift(self, conf):
"""
Note that the algorithm for shift is the SAME for arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if len(conf.buffer) <= 0:
return -1
idx_wi = conf.buffer.pop(0)
conf.stack.append(idx_wi)
class TransitionParser(ParserI):
"""
Class for transition based parser. Implement 2 algorithms which are "arc-standard" and "arc-eager"
"""
ARC_STANDARD = 'arc-standard'
ARC_EAGER = 'arc-eager'
def __init__(self, algorithm):
"""
:param algorithm: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
:type algorithm: str
"""
if not(algorithm in [self.ARC_STANDARD, self.ARC_EAGER]):
raise ValueError(" Currently we only support %s and %s " %
(self.ARC_STANDARD, self.ARC_EAGER))
self._algorithm = algorithm
self._dictionary = {}
self._transition = {}
self._match_transition = {}
def _get_dep_relation(self, idx_parent, idx_child, depgraph):
p_node = depgraph.nodes[idx_parent]
c_node = depgraph.nodes[idx_child]
if c_node['word'] is None:
return None # Root word
if c_node['head'] == p_node['address']:
return c_node['rel']
else:
return None
def _convert_to_binary_features(self, features):
"""
:param features: list of feature string which is needed to convert to binary features
:type features: list(str)
:return : string of binary features in libsvm format which is 'featureID:value' pairs
"""
unsorted_result = []
for feature in features:
self._dictionary.setdefault(feature, len(self._dictionary))
unsorted_result.append(self._dictionary[feature])
# Default value of each feature is 1.0
return ' '.join(str(featureID) + ':1.0' for featureID in sorted(unsorted_result))
def _is_projective(self, depgraph):
arc_list = []
for key in depgraph.nodes:
node = depgraph.nodes[key]
if 'head' in node:
childIdx = node['address']
parentIdx = node['head']
if parentIdx is not None:
arc_list.append((parentIdx, childIdx))
for (parentIdx, childIdx) in arc_list:
# Ensure that childIdx < parentIdx
if childIdx > parentIdx:
temp = childIdx
childIdx = parentIdx
parentIdx = temp
for k in range(childIdx + 1, parentIdx):
for m in range(len(depgraph.nodes)):
if (m < childIdx) or (m > parentIdx):
if (k, m) in arc_list:
return False
if (m, k) in arc_list:
return False
return True
def _write_to_file(self, key, binary_features, input_file):
"""
write the binary features to input file and update the transition dictionary
"""
self._transition.setdefault(key, len(self._transition) + 1)
self._match_transition[self._transition[key]] = key
input_str = str(self._transition[key]) + ' ' + binary_features + '\n'
input_file.write(input_str.encode('utf-8'))
def _create_training_examples_arc_std(self, depgraphs, input_file):
"""
Create the training example in the libsvm format and write it to the input_file.
Reference : Page 32, Chapter 3. Dependency Parsing by Sandra Kubler, Ryan McDonal and Joakim Nivre (2009)
"""
operation = Transition(self.ARC_STANDARD)
count_proj = 0
training_seq = []
for depgraph in depgraphs:
if not self._is_projective(depgraph):
continue
count_proj += 1
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
b0 = conf.buffer[0]
features = conf.extract_features()
binary_features = self._convert_to_binary_features(features)
if len(conf.stack) > 0:
s0 = conf.stack[len(conf.stack) - 1]
# Left-arc operation
rel = self._get_dep_relation(b0, s0, depgraph)
if rel is not None:
key = Transition.LEFT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.left_arc(conf, rel)
training_seq.append(key)
continue
# Right-arc operation
rel = self._get_dep_relation(s0, b0, depgraph)
if rel is not None:
precondition = True
# Get the max-index of buffer
maxID = conf._max_address
for w in range(maxID + 1):
if w != b0:
relw = self._get_dep_relation(b0, w, depgraph)
if relw is not None:
if (b0, relw, w) not in conf.arcs:
precondition = False
if precondition:
key = Transition.RIGHT_ARC + ':' + rel
self._write_to_file(
key,
binary_features,
input_file)
operation.right_arc(conf, rel)
training_seq.append(key)
continue
# Shift operation as the default
key = Transition.SHIFT
self._write_to_file(key, binary_features, input_file)
operation.shift(conf)
training_seq.append(key)
print(" Number of training examples : " + str(len(depgraphs)))
print(" Number of valid (projective) examples : " + str(count_proj))
return training_seq
def _create_training_examples_arc_eager(self, depgraphs, input_file):
"""
Create the training example in the libsvm format and write it to the input_file.
Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre
"""
operation = Transition(self.ARC_EAGER)
countProj = 0
training_seq = []
for depgraph in depgraphs:
if not self._is_projective(depgraph):
continue
countProj += 1
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
b0 = conf.buffer[0]
features = conf.extract_features()
binary_features = self._convert_to_binary_features(features)
if len(conf.stack) > 0:
s0 = conf.stack[len(conf.stack) - 1]
# Left-arc operation
rel = self._get_dep_relation(b0, s0, depgraph)
if rel is not None:
key = Transition.LEFT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.left_arc(conf, rel)
training_seq.append(key)
continue
# Right-arc operation
rel = self._get_dep_relation(s0, b0, depgraph)
if rel is not None:
key = Transition.RIGHT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.right_arc(conf, rel)
training_seq.append(key)
continue
# reduce operation
flag = False
for k in range(s0):
if self._get_dep_relation(k, b0, depgraph) is not None:
flag = True
if self._get_dep_relation(b0, k, depgraph) is not None:
flag = True
if flag:
key = Transition.REDUCE
self._write_to_file(key, binary_features, input_file)
operation.reduce(conf)
training_seq.append(key)
continue
# Shift operation as the default
key = Transition.SHIFT
self._write_to_file(key, binary_features, input_file)
operation.shift(conf)
training_seq.append(key)
print(" Number of training examples : " + str(len(depgraphs)))
print(" Number of valid (projective) examples : " + str(countProj))
return training_seq
def train(self, depgraphs, modelfile, verbose=True):
"""
:param depgraphs : list of DependencyGraph as the training data
:type depgraphs : DependencyGraph
:param modelfile : file name to save the trained model
:type modelfile : str
"""
try:
input_file = tempfile.NamedTemporaryFile(
prefix='transition_parse.train',
dir=tempfile.gettempdir(),
delete=False)
if self._algorithm == self.ARC_STANDARD:
self._create_training_examples_arc_std(depgraphs, input_file)
else:
self._create_training_examples_arc_eager(depgraphs, input_file)
input_file.close()
# Using the temporary file to train the libsvm classifier
x_train, y_train = load_svmlight_file(input_file.name)
# The parameter is set according to the paper:
# Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre
# Todo : because of probability = True => very slow due to
# cross-validation. Need to improve the speed here
model = svm.SVC(
kernel='poly',
degree=2,
coef0=0,
gamma=0.2,
C=0.5,
verbose=verbose,
probability=True)
model.fit(x_train, y_train)
# Save the model to file name (as pickle)
pickle.dump(model, open(modelfile, 'wb'))
finally:
remove(input_file.name)
def parse(self, depgraphs, modelFile):
"""
:param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy
:type depgraphs: list(DependencyGraph)
:param modelfile: the model file
:type modelfile: str
:return: list (DependencyGraph) with the 'head' and 'rel' information
"""
result = []
# First load the model
model = pickle.load(open(modelFile, 'rb'))
operation = Transition(self._algorithm)
for depgraph in depgraphs:
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
features = conf.extract_features()
col = []
row = []
data = []
for feature in features:
if feature in self._dictionary:
col.append(self._dictionary[feature])
row.append(0)
data.append(1.0)
np_col = array(sorted(col)) # NB : index must be sorted
np_row = array(row)
np_data = array(data)
x_test = sparse.csr_matrix((np_data, (np_row, np_col)), shape=(1, len(self._dictionary)))
# It's best to use decision function as follow BUT it's not supported yet for sparse SVM
# Using decision funcion to build the votes array
#dec_func = model.decision_function(x_test)[0]
#votes = {}
#k = 0
# for i in range(len(model.classes_)):
# for j in range(i+1, len(model.classes_)):
# #if dec_func[k] > 0:
# votes.setdefault(i,0)
# votes[i] +=1
# else:
# votes.setdefault(j,0)
# votes[j] +=1
# k +=1
# Sort votes according to the values
#sorted_votes = sorted(votes.items(), key=itemgetter(1), reverse=True)
# We will use predict_proba instead of decision_function
prob_dict = {}
pred_prob = model.predict_proba(x_test)[0]
for i in range(len(pred_prob)):
prob_dict[i] = pred_prob[i]
sorted_Prob = sorted(
prob_dict.items(),
key=itemgetter(1),
reverse=True)
# Note that SHIFT is always a valid operation
for (y_pred_idx, confidence) in sorted_Prob:
#y_pred = model.predict(x_test)[0]
# From the prediction match to the operation
y_pred = model.classes_[y_pred_idx]
if y_pred in self._match_transition:
strTransition = self._match_transition[y_pred]
baseTransition = strTransition.split(":")[0]
if baseTransition == Transition.LEFT_ARC:
if operation.left_arc(conf, strTransition.split(":")[1]) != -1:
break
elif baseTransition == Transition.RIGHT_ARC:
if operation.right_arc(conf, strTransition.split(":")[1]) != -1:
break
elif baseTransition == Transition.REDUCE:
if operation.reduce(conf) != -1:
break
elif baseTransition == Transition.SHIFT:
if operation.shift(conf) != -1:
break
else:
raise ValueError("The predicted transition is not recognized, expected errors")
# Finish with operations build the dependency graph from Conf.arcs
new_depgraph = deepcopy(depgraph)
for key in new_depgraph.nodes:
node = new_depgraph.nodes[key]
node['rel'] = ''
# With the default, all the token depend on the Root
node['head'] = 0
for (head, rel, child) in conf.arcs:
c_node = new_depgraph.nodes[child]
c_node['head'] = head
c_node['rel'] = rel
result.append(new_depgraph)
return result
def demo():
"""
>>> from nltk.parse import DependencyGraph, DependencyEvaluator
>>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition
>>> gold_sent = DependencyGraph(\"""
... Economic JJ 2 ATT
... news NN 3 SBJ
... has VBD 0 ROOT
... little JJ 5 ATT
... effect NN 3 OBJ
... on IN 5 ATT
... financial JJ 8 ATT
... markets NNS 6 PC
... . . 3 PU
... \""")
>>> conf = Configuration(gold_sent)
###################### Check the Initial Feature ########################
>>> print(', '.join(conf.extract_features()))
STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ
###################### Check The Transition #######################
Check the Initialized Configuration
>>> print(conf)
Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : []
A. Do some transition checks for ARC-STANDARD
>>> operation = Transition('arc-standard')
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
>>> operation.shift(conf)
>>> operation.left_arc(conf,"SBJ")
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
Middle Configuration and Features Check
>>> print(conf)
Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)]
>>> print(', '.join(conf.extract_features()))
STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT
>>> operation.right_arc(conf, "PC")
>>> operation.right_arc(conf, "ATT")
>>> operation.right_arc(conf, "OBJ")
>>> operation.shift(conf)
>>> operation.right_arc(conf, "PU")
>>> operation.right_arc(conf, "ROOT")
>>> operation.shift(conf)
Terminated Configuration Check
>>> print(conf)
Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)]
B. Do some transition checks for ARC-EAGER
>>> conf = Configuration(gold_sent)
>>> operation = Transition('arc-eager')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'SBJ')
>>> operation.right_arc(conf,'ROOT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.right_arc(conf,'OBJ')
>>> operation.right_arc(conf,'ATT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.right_arc(conf,'PC')
>>> operation.reduce(conf)
>>> operation.reduce(conf)
>>> operation.reduce(conf)
>>> operation.right_arc(conf,'PU')
>>> print(conf)
Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)]
###################### Check The Training Function #######################
A. Check the ARC-STANDARD training
>>> import tempfile
>>> import os
>>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False)
>>> parser_std = TransitionParser('arc-standard')
>>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file)))
Number of training examples : 1
Number of valid (projective) examples : 1
SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT
>>> parser_std.train([gold_sent],'temp.arcstd.model', verbose=False)
Number of training examples : 1
Number of valid (projective) examples : 1
>>> remove(input_file.name)
B. Check the ARC-EAGER training
>>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False)
>>> parser_eager = TransitionParser('arc-eager')
>>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file)))
Number of training examples : 1
Number of valid (projective) examples : 1
SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU
>>> parser_eager.train([gold_sent],'temp.arceager.model', verbose=False)
Number of training examples : 1
Number of valid (projective) examples : 1
>>> remove(input_file.name)
###################### Check The Parsing Function ########################
A. Check the ARC-STANDARD parser
>>> result = parser_std.parse([gold_sent], 'temp.arcstd.model')
>>> de = DependencyEvaluator(result, [gold_sent])
>>> de.eval() >= (0, 0)
True
B. Check the ARC-EAGER parser
>>> result = parser_eager.parse([gold_sent], 'temp.arceager.model')
>>> de = DependencyEvaluator(result, [gold_sent])
>>> de.eval() >= (0, 0)
True
Remove test temporary files
>>> remove('temp.arceager.model')
>>> remove('temp.arcstd.model')
Note that result is very poor because of only one training example.
"""
| mit |
vslavik/poedit | deps/boost/libs/numeric/odeint/performance/plot_result.py | 43 | 2225 | """
Copyright 2011-2014 Mario Mulansky
Copyright 2011-2014 Karsten Ahnert
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or
copy at http://www.boost.org/LICENSE_1_0.txt)
"""
import numpy as np
from matplotlib import pyplot as plt
plt.rc("font", size=16)
def get_runtime_from_file(filename):
gcc_perf_file = open(filename, 'r')
for line in gcc_perf_file:
if "Minimal Runtime:" in line:
return float(line.split(":")[-1])
t_gcc = [get_runtime_from_file("perf_workbook/odeint_rk4_array_gcc.perf"),
get_runtime_from_file("perf_ariel/odeint_rk4_array_gcc.perf"),
get_runtime_from_file("perf_lyra/odeint_rk4_array_gcc.perf")]
t_intel = [get_runtime_from_file("perf_workbook/odeint_rk4_array_intel.perf"),
get_runtime_from_file("perf_ariel/odeint_rk4_array_intel.perf"),
get_runtime_from_file("perf_lyra/odeint_rk4_array_intel.perf")]
t_gfort = [get_runtime_from_file("perf_workbook/rk4_gfort.perf"),
get_runtime_from_file("perf_ariel/rk4_gfort.perf"),
get_runtime_from_file("perf_lyra/rk4_gfort.perf")]
t_c_intel = [get_runtime_from_file("perf_workbook/rk4_c_intel.perf"),
get_runtime_from_file("perf_ariel/rk4_c_intel.perf"),
get_runtime_from_file("perf_lyra/rk4_c_intel.perf")]
print t_c_intel
ind = np.arange(3) # the x locations for the groups
width = 0.15 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
rects1 = ax.bar(ind, t_gcc, width, color='b', label="odeint gcc")
rects2 = ax.bar(ind+width, t_intel, width, color='g', label="odeint intel")
rects3 = ax.bar(ind+2*width, t_c_intel, width, color='y', label="C intel")
rects4 = ax.bar(ind+3*width, t_gfort, width, color='c', label="gfort")
ax.axis([-width, 2.0+5*width, 0.0, 0.85])
ax.set_ylabel('Runtime (s)')
ax.set_title('Performance for integrating the Lorenz system')
ax.set_xticks(ind + 1.5*width)
ax.set_xticklabels(('Core i5-3210M\n3.1 GHz',
'Xeon E5-2690\n3.8 GHz',
'Opteron 8431\n 2.4 GHz'))
ax.legend(loc='upper left', prop={'size': 16})
plt.savefig("perf.pdf")
plt.savefig("perf.png", dpi=50)
plt.show()
| mit |
Alex-Girard/D3MapOverlays | data/extract_food_inspections_data.py | 1 | 1633 | #!/usr/bin/env python
import pandas as pd
import numpy as np
def getViolationsScore():
df = pd.read_csv('data/tmp/food/violations_plus.csv', encoding="windows-1252")
df = df[['business_id','date','risk_category']].dropna()
# drop invalid dates
df = df[df['date'] < 20141231]
latest = df['date'].max()
oldest = df['date'].min()
def computeViolationScore(row):
if row['risk_category'] == 'Low Risk':
risk = 1
elif row['risk_category'] == 'Moderate Risk':
risk = 2
elif row['risk_category'] == 'High Risk':
risk = 3
else:
return 0
return risk * (row['date'] - oldest) * 100 / (3 * (latest - oldest))
df['violationScore'] = df.apply(computeViolationScore, axis=1)
return df.groupby(['business_id'])['violationScore'].agg(np.mean)
def getLatestInspections():
df = pd.read_csv('data/tmp/food/inspections.csv', encoding="windows-1252")
df = df[['business_id','Score','date']].dropna()
return df[df.groupby(['business_id'])['date'].transform(max) == df['date']]
def getBusinessData():
df = pd.read_csv('data/tmp/food/businesses.csv', encoding="windows-1252")
df = df[['business_id','name','latitude','longitude']].dropna()
df = df[df.latitude > 36]
df = df[df.longitude < -122.3]
data = df.merge(getLatestInspections(), how='inner', on='business_id')
data = data.join(getViolationsScore(), how='left', on='business_id')
data['violationScore'].fillna(value=0, inplace=True)
data['total'] = data['Score'] - data['violationScore']
# data = data[data['total'] > 99]
return data
df = getBusinessData()
df.to_csv('data/tmp/food_inspection.csv', cols=['latitude','longitude','total'], index=False)
| mit |
softwaresaved/fat | lowfat/management/commands/fixgrant.py | 2 | 1405 | import pandas as pd
from django.core.management.base import BaseCommand
from lowfat.models import Fund
class Command(BaseCommand):
help = "Fix grant"
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='activities.csv')
# pylint: disable=too-many-branches,too-many-locals
def handle(self, *args, **options):
data = pd.read_csv(options['csv'])
for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable
try:
funds = Fund.objects.filter(
claimant__forenames=line["fornames"],
claimant__surname=line["surname"],
title=line["title"]
)
for fund in funds:
fund.grant = line["grant"]
if line["grant_heading"] == "Fellowship":
fund.grant_heading = "F"
elif line["grant_heading"] == "Core":
fund.grant_heading = "I"
elif line["grant_heading"] == "Continuing":
fund.grant_heading = "C"
print("Changing {}...".format(fund))
fund.save()
print("Changed {}...".format(fund))
except BaseException as exception:
print("Error: {}\n\t{}".format(exception, line))
| bsd-3-clause |
giacomov/3ML | threeML/utils/photometry/filter_set.py | 1 | 5715 | from __future__ import division
from builtins import zip
from builtins import object
from past.utils import old_div
import speclite.filters as spec_filters
import astropy.units as astro_units
import numpy as np
import astropy.constants as constants
from threeML.utils.interval import IntervalSet
class NotASpeclikeFilter(RuntimeError):
pass
class FilterSet(object):
def __init__(self, filter, mask=None):
"""
This class handles the optical filter functionality. It is build around speclite:
http://speclite.readthedocs.io/en/latest/
It accepts speclite fitlerresponse or sequences, allowing for full customization
of the fitlers.
:param filter: a speclite FitlerResponse or FilterSequence
:param mask: an initial mask on the filters (bool array) that remains fixed
"""
# we explicitly violate duck typing here in order to have one routine
# to return values from the filters (speclite appends 's' to the end of sequence calls)
if isinstance(filter, spec_filters.FilterResponse):
# we will make a sequence
self._filters = spec_filters.FilterSequence([filter])
elif isinstance(filter, spec_filters.FilterSequence):
self._filters = filter # type: spec_filters.FilterSequence
else:
raise NotASpeclikeFilter(
"filter must be a speclite FilterResponse or FilterSequence"
)
if mask is not None:
tmp = []
for condition, response in zip(mask, self._filters):
if condition:
tmp.append(response)
self._filters = spec_filters.FilterSequence(tmp)
self._names = np.array([name.split("-")[1] for name in self._filters.names])
self._long_name = self._filters.names
# haven't set a likelihood model yet
self._model_set = False
# calculate the FWHM
self._calculate_fwhm()
@property
def wavelength_bounds(self):
"""
IntervalSet of FWHM bounds of the filters
:return:
"""
return self._wavebounds
def _calculate_fwhm(self):
"""
calculate the FWHM of the filters
:return:
"""
wmin = []
wmax = []
# go through each filter
# and find the non-gaussian FWHM bounds
for filter in self._filters:
response = filter.response
max_response = response.max()
idx_max = response.argmax()
half_max = 0.5 * max_response
idx1 = abs(response[:idx_max] - half_max).argmin()
idx2 = abs(response[idx_max:] - half_max).argmin() + idx_max
# have to grab the private member here
# bc the library does not expose it!
w1 = filter._wavelength[idx1]
w2 = filter._wavelength[idx2]
wmin.append(w1)
wmax.append(w2)
self._wavebounds = IntervalSet.from_starts_and_stops(wmin, wmax)
def set_model(self, differential_flux):
"""
set the model of that will be used during the convolution. Not that speclite
considers a differential flux to be in units of erg/s/cm2/lambda so we must convert
astromodels into the proper units (using astropy units!)
"""
conversion_factor = (constants.c ** 2 * constants.h ** 2).to("keV2 * cm2")
def wrapped_model(x):
return old_div(differential_flux(x) * conversion_factor, x ** 3)
self._wrapped_model = wrapped_model
self._model_set = True
def ab_magnitudes(self):
"""
return the effective stimulus of the model and filter for the given
magnitude system
:return: np.ndarray of ab magnitudes
"""
assert self._model_set, "no likelihood model has been set"
# speclite has issues with unit conversion
# so we will do the calculation manually here
ratio = []
for filter in self._filters:
# first get the flux and convert it to base units
synthetic_flux = filter.convolve_with_function(self._wrapped_model).to(
"1/(cm2 s)"
)
# normalize it to the filter's AB magnitude
ratio.append(
(old_div(synthetic_flux, filter.ab_zeropoint.to("1/(cm2 s)"))).value
)
ratio = np.array(ratio)
return -2.5 * np.log10(ratio)
# return self._filters.get_ab_magnitudes(self._wrapped_model).to_pandas().loc[0]
def plot_filters(self):
"""
plot the filter/ transmission curves
:return: fig
"""
spec_filters.plot_filters(self._filters)
@property
def n_bands(self):
"""
:return: the number of bands
"""
return len(self._filters.names)
@property
def filter_names(self):
"""
:return: the filter names
"""
return self._names
@property
def native_filter_names(self):
"""
the native filter names
:return:
"""
return self._filters.names
@property
def speclite_filters(self):
"""
exposes the speclite fitlers for simulations
:return:
"""
return self._filters
@property
def effective_wavelength(self):
"""
:return: the average wave length of the filters
"""
return self._filters.effective_wavelengths
@property
def waveunits(self):
"""
:return: the pysynphot wave units
"""
return astro_units.Angstrom
| bsd-3-clause |
mhdella/ThinkStats2 | code/hinc.py | 67 | 1494 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import thinkplot
import thinkstats2
def Clean(s):
"""Converts dollar amounts to integers."""
try:
return int(s.lstrip('$').replace(',', ''))
except ValueError:
if s == 'Under':
return 0
elif s == 'over':
return np.inf
return None
def ReadData(filename='hinc06.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
data = pandas.read_csv(filename, header=None, skiprows=9)
cols = data[[0, 1]]
res = []
for _, row in cols.iterrows():
label, freq = row.values
freq = int(freq.replace(',', ''))
t = label.split()
low, high = Clean(t[0]), Clean(t[-1])
res.append((high, freq))
df = pandas.DataFrame(res)
# correct the first range
df[0][0] -= 1
# compute the cumulative sum of the freqs
df[2] = df[1].cumsum()
# normalize the cumulative freqs
total = df[2][41]
df[3] = df[2] / total
# add column names
df.columns = ['income', 'freq', 'cumsum', 'ps']
return df
def main():
df = ReadData()
print(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
Ktakuya332C/FERL | sim.py | 1 | 1350 | from __future__ import division
import numpy as np
from matplotlib import pylab as plt
from tqdm import tqdm, trange
from tools import *
# Paramters
n_hidden = 13
dim_state = 12
dim_action = 40
scale = 0.7
n_key_states = 13
n_train = 30000
n_sample = 100
start_learning_rate = 0.01
last_learning_rate = 0.01
learning_rate = start_learning_rate
start_beta = 1
last_beta = 10
beta = start_beta
ave_per = 1000
# Define MDP
mdp = LargeActionTask(n_key_states, dim_state, dim_action)
# RBM training
rbm = RBM(n_hidden, dim_state, dim_action, scale)
rewards = []
mean_rewards = []
print( "Training start" )
t = trange(1, n_train + 1)
for i in t:
# Learning rate adaptation
learning_rate = start_learning_rate * ( last_learning_rate / start_learning_rate ) ** ( i / n_train )
beta = start_beta * ( last_beta / start_beta ) ** ( i / n_train )
# Training
state = mdp.next_key_state()
action = rbm.play(state, n_sample, beta)
reward = mdp.reward(action)
rbm.qlearn(state, action, reward, learning_rate)
# Save reward
t.set_description("Reward %d"%reward)
rewards.append(reward)
if i % ave_per == 0 and i > 0:
mean_rewards.append( np.mean(rewards[i-ave_per:i]) )
# Plotting
plt.plot(np.arange(1, 31), mean_rewards)
plt.xlabel("1000s iterations")
plt.ylabel("Average reward")
plt.savefig("result.png")
| mit |
basilfx/RIOT | tests/pkg_cmsis-nn/generate_image.py | 15 | 1140 | #!/usr/bin/env python3
"""Generate a binary file from a sample image of the CIFAR-10 dataset.
Pixel of the sample are stored as uint8, images have size 32x32x3.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
_, (cifar10_test, _) = cifar10.load_data()
data = cifar10_test[args.index]
data = data.astype('uint8')
output_path = os.path.join(SCRIPT_DIR, args.output)
np.ndarray.tofile(data, output_path)
if args.no_plot is False:
plt.imshow(data)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in CIFAR test dataset")
parser.add_argument("-o", "--output", type=str, default='input',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
| lgpl-2.1 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/pylab_examples/colours.py | 12 | 1320 | #!/usr/bin/env python
# -*- noplot -*-
"""
Some simple functions to generate colours.
"""
import numpy as np
from matplotlib.colors import colorConverter
def pastel(colour, weight=2.4):
""" Convert colour into a nice pastel shade"""
rgb = np.asarray(colorConverter.to_rgb(colour))
# scale colour
maxc = max(rgb)
if maxc < 1.0 and maxc > 0:
# scale colour
scale = 1.0 / maxc
rgb = rgb * scale
# now decrease saturation
total = rgb.sum()
slack = 0
for x in rgb:
slack += 1.0 - x
# want to increase weight from total to weight
# pick x s.t. slack * x == weight - total
# x = (weight - total) / slack
x = (weight - total) / slack
rgb = [c + (x * (1.0-c)) for c in rgb]
return rgb
def get_colours(n):
""" Return n pastel colours. """
base = np.asarray([[1,0,0], [0,1,0], [0,0,1]])
if n <= 3:
return base[0:n]
# how many new colours to we need to insert between
# red and green and between green and blue?
needed = (((n - 3) + 1) / 2, (n - 3) / 2)
colours = []
for start in (0, 1):
for x in np.linspace(0, 1, needed[start]+2):
colours.append((base[start] * (1.0 - x)) +
(base[start+1] * x))
return [pastel(c) for c in colours[0:n]]
| gpl-2.0 |
kingkarlaachen/TraitsMatplotlibWidget | DraggableResizableRectangle.py | 2 | 18419 |
import numpy as np
from traits.api import HasTraits, Instance, Any, Str, on_trait_change, Int, Event
import matplotlib.patches as mpatches
import matplotlib
def axes_boundery_check(pos_1,pos_2,dx,dy,xlim,ylim):
'''
Checks whether the movement of the widget would result in a final position which is outside of the data range. If
widget being outside of data range, it sets dx/ dy so that the widget is at the closest value inside the data.
:param pos_1: Widget source point 1
:param pos_2: Widget source point 2
:param dx: shift of widget in x direction
:param dy: shift of widget in y direction
:param xlim: axes limits in x direction
:param ylim: axes limits in y direction
:return: Returns
'''
x0, y0 = pos_1
x1, y1 = pos_2
if np.min([x0 + dx, x1 + dx]) < np.min(xlim):
if x0 < x1:
dx = np.min(xlim) - x0
else:
dx = np.min(xlim) - x1
if np.max([x0 + dx, x1 + dx]) > np.max(xlim):
if x0 > x1:
dx = np.max(xlim) - x0
else:
dx = np.max(xlim) - x1
if np.min([y0 + dy, y1 + dy]) < np.min(ylim):
if y0 < y1:
dy = np.min(ylim) - y0
else:
dy = np.min(ylim) - y1
if np.max([y0 + dy, y1 + dy]) > np.max(ylim):
if y0 > y1:
dy = np.max(ylim) - y0
else:
dy = np.max(ylim) - y1
return dx, dy
class DraggableResizeableLine(HasTraits):
"""
Resizable Lines based on the DraggabelResizableRectangle. Draggable is yet not implemented
Author: KingKarl, April 2017
"""
lock = None # only one can be animated at a time
updateXY = Int(0)
updateText = Int(0)
released = Int(0)
axes_xlim = None
axes_ylim = None
def __init__(self, line, border_tol=0.15, allow_resize=True,
fixed_aspect_ratio=False):
super(DraggableResizeableLine,self).__init__()
self.line = line
self.border_tol = border_tol
self.press = None
if DraggableResizeableLine.axes_xlim is None:
DraggableResizeableLine.axes_xlim = self.line.axes.get_xlim()
if DraggableResizeableLine.axes_ylim is None:
DraggableResizeableLine.axes_ylim = self.line.axes.get_ylim()
@staticmethod
def reset_borders():
DraggableResizeableLine.axes_xlim = None
DraggableResizeableLine.axes_ylim = None
print('Reset of DraggableResizableLines Border to None')
def connect(self):
'connect to all the events we need'
self.cidpress = self.line.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.cidrelease = self.line.figure.canvas.mpl_connect('button_release_event', self.on_release)
self.cidmotion = self.line.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
def on_press(self, event):
'on button press we will see if the mouse is over us and store some data'
if self.line.figure.canvas.toolbar._active is not None: return
if event.inaxes != self.line.axes: return
if DraggableResizeableLine.lock is not None: return
if np.abs(self.line.axes.get_xlim()[0]-self.line.axes.get_xlim()[1])>np.abs(DraggableResizeableLine.axes_xlim[0]-DraggableResizeableLine.axes_xlim[1]):
DraggableResizeableLine.axes_xlim = self.line.axes.get_xlim()
if np.abs(self.line.axes.get_ylim()[0]-self.line.axes.get_ylim()[1])>np.abs(DraggableResizeableLine.axes_ylim[0]-DraggableResizeableLine.axes_ylim[1]):
DraggableResizeableLine.axes_ylim = self.line.axes.get_ylim()
x,y = self.line.get_data()
x0, x1 = x
y0, y1 = y
if not self.within_border_tol([x0, y0],[x1, y1],event): return
DraggableResizeableLine.lock = self
self.press = x0, y0, x1, y1, event.xdata, event.ydata
canvas = self.line.figure.canvas
axes = self.line.axes
self.line.set_animated(True)
canvas.draw()
self.background = canvas.copy_from_bbox(self.line.axes.bbox)
# now redraw just the rectangle
axes.draw_artist(self.line)
# and blit just the redrawn area
canvas.blit(axes.bbox)
def within_border_tol(self,pos_0, pos_1, event):
x0, y0 = pos_0
x1, y1 = pos_1
xpress, ypress = event.xdata, event.ydata
bt = self.border_tol * (abs(x0-x1)**2 + abs(y0-y1)**2)**0.5
if (abs(x0-xpress)**2+abs(y0-ypress)**2)**0.5<2**0.5*abs(bt) or (abs(x1-xpress)**2+abs(y1-ypress)**2)**0.5<2**0.5*abs(bt) or (abs((x0+x1)/2-xpress)**2+abs((y0+y1)/2-ypress)**2)**0.5<2**0.5*abs(bt):
return True
else:
return False
def on_motion(self, event):
'on motion we will move the rect if the mouse is over us'
if DraggableResizeableLine.lock is not self:
return
if event.inaxes != self.line.axes: return
x0, y0, x1, y1, xpress, ypress = self.press
self.dx = event.xdata - xpress
self.dy = event.ydata - ypress
self.update_line()
canvas = self.line.figure.canvas
axes = self.line.axes
# restore the background region
canvas.restore_region(self.background)
# redraw just the current line
axes.draw_artist(self.line)
# blit just the redrawn area
canvas.blit(axes.bbox)
self.updateXY += 1
def on_release(self, event):
'on release we reset the press data'
if DraggableResizeableLine.lock is not self:
return
self.press = None
DraggableResizeableLine.lock = None
# turn off the rect animation property and reset the background
self.line.set_animated(False)
self.background = None
self.updateText +=1
# redraw the full figure
self.line.figure.canvas.draw()
self.released += 1
def disconnect(self):
'disconnect all the stored connection ids'
self.line.figure.canvas.mpl_disconnect(self.cidpress)
self.line.figure.canvas.mpl_disconnect(self.cidrelease)
self.line.figure.canvas.mpl_disconnect(self.cidmotion)
def update_line(self):
x0, y0, x1, y1, xpress, ypress = self.press
bt = self.border_tol * (abs(x0-x1)**2 + abs(y0-y1)**2)**0.5
dx, dy = self.dx, self.dy
dx, dy = axes_boundery_check([x0,y0],[x1,y1],dx,dy,DraggableResizeableLine.axes_xlim,DraggableResizeableLine.axes_ylim)
if (abs(x0-xpress)**2+abs(y0-ypress)**2)**0.5<2**0.5*abs(bt): # Check for if mouse close to start (pos 0) of line
self.line.set_data([x0+dx,x1],[y0+dy,y1])
elif (abs(x1-xpress)**2+abs(y1-ypress)**2)**0.5<2**0.5*abs(bt): # Check for if mouse close to start (pos 1) of line
self.line.set_data([x0,x1+dx],[y0,y1+dy])
elif (abs((x0+x1)/2-xpress)**2+abs((y0+y1)/2-ypress)**2)**0.5<2**0.5*abs(bt): # Make line draggable at center
self.line.set_data([x0+dx,x1+dx],[y0+dy,y1+dy])
class AnnotatedLine(HasTraits):
axes = Instance(matplotlib.axes.Axes)
annotext = Instance(matplotlib.text.Text)
text = Str()
drl = Instance(DraggableResizeableLine)
lineUpdated = Int(0)
lineReleased = Int(0)
def __init__(self, axes, x0, y0, x1, y1,text, color = 'k'):#text, color='c', ecolor='k', alpha=0.7):
print("View: Line created")
super(AnnotatedLine, self).__init__()
self.pos_0 = [x0,y0]
self.pos_1 = [x1,y1]
self.axes = axes
self.text = text
line_handle = self.axes.plot([x0,x1],[y0,y1],color = color)[0]
self.line = line_handle
self.drl = DraggableResizeableLine(line_handle)
self.drl.connect()
def disconnect(self):
self.drl.disconnect()
def connect(self):
self.drl.connect()
@on_trait_change('drl.updateText')
def updateText(self):
try:
self.annotext.remove()
except AttributeError:
print("AnnotatedRectangle: Found no annotated text")
x, y = self.line.get_data()
self.pos_0 = np.array([x[0],y[0]])
self.pos_1 = np.array([x[1],y[1]])
self.annotext = self.axes.annotate(self.text, self.pos_1+(self.pos_0-self.pos_1)/2, color='w', weight='bold',fontsize=6, ha='center', va='center')
@on_trait_change('drl.updateXY')
def xyLineUpdated(self):
self.lineUpdated += 1
@on_trait_change('drl.released')
def released(self):
print("AnnotatedRectangle: Rectangle released")
x, y = self.line.get_data()
self.pos_0 = np.array([x[0],y[0]])
self.pos_1 = np.array([x[1],y[1]])
self.lineReleased += 1
def get_pos(self):
return self.pos_0, self.pos_1
def remove(self):
self.disconnect()
self.line.remove()
self.annotext.remove()
del self
class DraggableResizeableRectangle(HasTraits):
"""
Draggable and resizeable rectangle with the animation blit techniques.
Based on example code at
http://matplotlib.sourceforge.net/users/event_handling.html
If *allow_resize* is *True* the recatngle can be resized by dragging its
lines. *border_tol* specifies how close the pointer has to be to a line for
the drag to be considered a resize operation. Dragging is still possible by
clicking the interior of the rectangle. *fixed_aspect_ratio* determines if
the recatngle keeps its aspect ratio during resize operations.
"""
updateText = Int(0)
updateXY = Int(0)
released = Int(0)
lock = None # only one can be animated at a time
axes_xlim = None # Needed to allow widget to leave boundaries of zoomed in data. Might be unnecessary of matplotlib allows do get the unzoomed axes.
axes_ylim = None
@staticmethod
def reset_borders():
DraggableResizeableRectangle.axes_xlim = None
DraggableResizeableRectangle.axes_ylim = None
print('Reset of DraggableResizableRectangle Border to None')
def __init__(self, rect, border_tol=.15, allow_resize=True,
fixed_aspect_ratio=False):
self.rect = rect
self.border_tol = border_tol
self.allow_resize = allow_resize
self.fixed_aspect_ratio = fixed_aspect_ratio
self.press = None
self.background = None
if DraggableResizeableRectangle.axes_xlim is None:
DraggableResizeableRectangle.axes_xlim = self.rect.axes.get_xlim()
if DraggableResizeableRectangle.axes_ylim is None:
DraggableResizeableRectangle.axes_ylim = self.rect.axes.get_ylim()
def connect(self):
'connect to all the events we need'
self.cidpress = self.rect.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.rect.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.rect.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
def on_press(self, event):
'on button press we will see if the mouse is over us and store some data'
if self.rect.figure.canvas.toolbar._active is not None: return
if event.inaxes != self.rect.axes: return
if DraggableResizeableRectangle.lock is not None: return
contains, attrd = self.rect.contains(event)
if not contains: return
if np.abs(self.rect.axes.get_xlim()[0]-self.rect.axes.get_xlim()[1])>np.abs(DraggableResizeableRectangle.axes_xlim[0]-DraggableResizeableRectangle.axes_xlim[1]):
DraggableResizeableRectangle.axes_xlim = self.rect.axes.get_xlim()
if np.abs(self.rect.axes.get_ylim()[0]-self.rect.axes.get_ylim()[1])>np.abs(DraggableResizeableRectangle.axes_ylim[0]-DraggableResizeableRectangle.axes_ylim[1]):
DraggableResizeableRectangle.axes_ylim = self.rect.axes.get_ylim()
x0, y0 = self.rect.xy
w0, h0 = self.rect.get_width(), self.rect.get_height()
aspect_ratio = np.true_divide(w0, h0)
self.press = x0, y0, w0, h0, aspect_ratio, event.xdata, event.ydata
DraggableResizeableRectangle.lock = self
# draw everything but the selected rectangle and store the pixel buffer
canvas = self.rect.figure.canvas
axes = self.rect.axes
self.rect.set_animated(True)
canvas.draw()
self.background = canvas.copy_from_bbox(self.rect.axes.bbox)
# now redraw just the rectangle
axes.draw_artist(self.rect)
# and blit just the redrawn area
canvas.blit(axes.bbox)
def on_motion(self, event):
'on motion we will move the rect if the mouse is over us'
if DraggableResizeableRectangle.lock is not self:
return
if event.inaxes != self.rect.axes: return
x0, y0, w0, h0, aspect_ratio, xpress, ypress = self.press
self.dx = event.xdata - xpress
self.dy = event.ydata - ypress
self.update_rect()
canvas = self.rect.figure.canvas
axes = self.rect.axes
# restore the background region
canvas.restore_region(self.background)
# redraw just the current rectangle
axes.draw_artist(self.rect)
# blit just the redrawn area
canvas.blit(axes.bbox)
self.updateXY += 1
def on_release(self, event):
'on release we reset the press data'
if DraggableResizeableRectangle.lock is not self:
return
self.press = None
DraggableResizeableRectangle.lock = None
# turn off the rect animation property and reset the background
self.rect.set_animated(False)
self.background = None
self.updateText += 1
# redraw the full figure
self.rect.figure.canvas.draw()
self.released += 1
def disconnect(self):
'disconnect all the stored connection ids'
self.rect.figure.canvas.mpl_disconnect(self.cidpress)
self.rect.figure.canvas.mpl_disconnect(self.cidrelease)
self.rect.figure.canvas.mpl_disconnect(self.cidmotion)
def update_rect(self):
x0, y0, w0, h0, aspect_ratio, xpress, ypress = self.press
dx, dy = self.dx, self.dy
bt = self.border_tol
fixed_ar = self.fixed_aspect_ratio
dx, dy = axes_boundery_check([x0,y0],[x0+w0,y0+h0],dx,dy,self.rect.axes.get_xlim(),self.rect.axes.get_ylim())
if (not self.allow_resize or
(abs(x0+np.true_divide(w0,2)-xpress)<np.true_divide(w0,2)-bt*w0 and
abs(y0+np.true_divide(h0,2)-ypress)<np.true_divide(h0,2)-bt*h0)):
self.rect.set_x(x0+dx)
self.rect.set_y(y0+dy)
elif abs(x0-xpress)<bt*w0:
self.rect.set_x(x0+dx)
self.rect.set_width(w0-dx)
if fixed_ar:
dy = np.true_divide(dx, aspect_ratio)
self.rect.set_y(y0+dy)
self.rect.set_height(h0-dy)
elif abs(x0+w0-xpress)<bt*w0:
self.rect.set_width(w0+dx)
if fixed_ar:
dy = np.true_divide(dx, aspect_ratio)
self.rect.set_height(h0+dy)
elif abs(y0-ypress)<bt*h0:
self.rect.set_y(y0+dy)
self.rect.set_height(h0-dy)
if fixed_ar:
dx = dy*aspect_ratio
self.rect.set_x(x0+dx)
self.rect.set_width(w0-dx)
elif abs(y0+h0-ypress)<bt*h0:
self.rect.set_height(h0+dy)
if fixed_ar:
dx = dy*aspect_ratio
self.rect.set_width(w0+dx)
class AnnotatedRectangle(HasTraits):
rectangle = Instance(mpatches.Rectangle)
axes = Instance(matplotlib.axes.Axes)
annotext = Instance(matplotlib.text.Text)
text = Str()
drr = Instance(DraggableResizeableRectangle)
rectUpdated = Int(0)
rectReleased = Int(0)
def __init__(self, axes, x1, y1, x2, y2, text, color='c', ecolor='k', alpha=0.7):
print("View: AnnotatedRectangle created")
super(AnnotatedRectangle, self).__init__()
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
# Rectangle Workaround, because it's not draggable when selecting from top
if x1 > x2:
temp = x2
x2 = x1
x1 = temp
if y1 > y2:
temp = y2
y2 = y1
y1 = temp
xto = x2 - x1
yto = y2 - y1
rectangle = mpatches.Rectangle((x1, y1), xto, yto, ec=ecolor, color=color, alpha=alpha)
self.text = text
self.axes = axes
xtext = self.x1 + (self.x2 - self.x1) / 2.
ytext = self.y1 + (self.y2 - self.y1) / 2.
self.axes.add_patch(rectangle)
self.drr = DraggableResizeableRectangle(rectangle)
self.drr.connect()
self.rectangle = self.drr.rect
@on_trait_change('drr.updateText')
def updateText(self):
try:
self.annotext.remove()
except AttributeError:
print("AnnotatedRectangle: Found no annotated text")
x1, y1 = self.drr.rect.get_xy()
x2 = x1 + self.drr.rect.get_width()/2.0
y2 = y1 + self.drr.rect.get_height()/2.0
self.annotext = self.axes.annotate(self.text, (x2, y2), color='w', weight='bold',
fontsize=6, ha='center', va='center')
@on_trait_change('drr.updateXY')
def xyRectUpdated(self):
self.rectUpdated += 1
@on_trait_change('drr.released')
def released(self):
print("AnnotatedRectangle: Rectangle released")
self.x1, self.y1 = self.drr.rect.get_xy()
self.x2 = self.x1+self.drr.rect.get_width()
self.y2 = self.y1+self.drr.rect.get_width()
self.rectReleased += 1
def remove(self):
self.disconnect()
self.rectangle.remove()
self.annotext.remove()
del self
def get_rect_xy(self):
return self.drr.rect.get_xy()
def get_rect_width(self):
return self.drr.rect.get_width()
def get_rect_height(self):
return self.drr.rect.get_height()
def disconnect(self):
self.drr.disconnect()
def connect(self):
self.drr.connect()
if __name__ == '__main__':
p = mpatches.Rectangle((0.5, 0.5), 0.2, 0.2, ec="k", color='c', alpha=0.7)
d = DraggableResizeableRectangle(p)
d.configure_traits() | mit |
kayak/fireant | fireant/tests/widgets/test_pandas.py | 2 | 30582 | import copy
from functools import partial
from unittest import TestCase
import numpy as np
import pandas as pd
import pandas.testing
from pypika import Table
from pypika.analytics import Sum
from fireant import DataSet, DataType, Field, Rollup
from fireant.tests.dataset.mocks import (
CumSum,
ElectionOverElection,
dimx0_metricx1_df,
dimx0_metricx2_df,
dimx1_date_df,
dimx1_date_operation_df,
dimx1_num_df,
dimx1_str_df,
dimx2_date_str_df,
dimx2_date_str_ref_df,
mock_dataset,
no_index_df,
test_database,
)
from fireant.utils import alias_selector as f
from fireant.widgets.pandas import Pandas
def format_float(x, is_raw=False):
if pd.isnull(x):
return ''
if x in [np.inf, -np.inf]:
return 'Inf'
return f'{x:.0f}' if is_raw else f'{x:,.0f}'
format_float_raw = partial(format_float, is_raw=True)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
class PandasTransformerTests(TestCase):
maxDiff = None
def test_metricx1(self):
result = Pandas(mock_dataset.fields.votes).transform(dimx0_metricx1_df, [], [])
expected = dimx0_metricx1_df.copy()[[f('votes')]]
expected.columns = ['Votes']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metricx2(self):
result = Pandas(mock_dataset.fields.votes, mock_dataset.fields.wins).transform(dimx0_metricx2_df, [], [])
expected = dimx0_metricx2_df.copy()[[f('votes'), f('wins')]]
expected.columns = ['Votes', 'Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metricx2_reversed(self):
result = Pandas(mock_dataset.fields.wins, mock_dataset.fields.votes).transform(dimx0_metricx2_df, [], [])
expected = dimx0_metricx2_df.copy()[[f('wins'), f('votes')]]
expected.columns = ['Wins', 'Votes']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx1_date(self):
result = Pandas(mock_dataset.fields.wins).transform(dimx1_date_df, [mock_dataset.fields.timestamp], [])
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx1_date_with_operation(self):
result = Pandas(CumSum(mock_dataset.fields.votes)).transform(
dimx1_date_operation_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_operation_df.copy()[[f('cumsum(votes)')]]
expected.index.names = ['Timestamp']
expected.columns = ['CumSum(Votes)']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx1_str(self):
result = Pandas(mock_dataset.fields.wins).transform(dimx1_str_df, [mock_dataset.fields.political_party], [])
expected = dimx1_str_df.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx1_int(self):
result = Pandas(mock_dataset.fields.wins).transform(dimx1_num_df, [mock_dataset.fields['candidate-id']], [])
expected = dimx1_num_df.copy()[[f('wins')]]
expected.index.names = ['Candidate ID']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx2_date_str(self):
dimensions = [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
result = Pandas(mock_dataset.fields.wins).transform(dimx2_date_str_df, dimensions, [])
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_transpose_dimx2_str(self):
result = Pandas(mock_dataset.fields.wins, transpose=True).transform(
dimx1_str_df, [mock_dataset.fields.political_party], []
)
expected = dimx1_str_df.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.transpose()
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx1_str_transposes_data_frame(self):
result = Pandas(mock_dataset.fields.wins, pivot=[mock_dataset.fields.political_party]).transform(
dimx1_str_df, [mock_dataset.fields.political_party], []
)
expected = dimx1_str_df.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.transpose()
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str(self):
result = Pandas(mock_dataset.fields.wins, pivot=[mock_dataset.fields.political_party]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_hidden_dimx2_date_str(self):
dimensions = [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
result = Pandas(mock_dataset.fields.wins, hide=[mock_dataset.fields.political_party]).transform(
dimx2_date_str_df, dimensions, []
)
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.reset_index('$political_party', inplace=True, drop=True)
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_hidden_metric_dimx2_date_str(self):
dimensions = [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
references = [ElectionOverElection(mock_dataset.fields.timestamp)]
result = Pandas(mock_dataset.fields.votes, hide=[mock_dataset.fields.votes]).transform(
dimx2_date_str_ref_df, dimensions, references
)
expected = dimx2_date_str_ref_df.copy()[[f('votes_eoe')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Votes EoE']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_hidden_ref_dimx2_date_str(self):
dimensions = [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
references = [ElectionOverElection(mock_dataset.fields.timestamp)]
result = Pandas(mock_dataset.fields.votes, hide=['votes_eoe']).transform(
dimx2_date_str_ref_df, dimensions, references
)
expected = dimx2_date_str_ref_df.copy()[[f('votes')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Votes']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_fetch_only_dimx2_date_str(self):
dimensions = [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
dimensions[1].fetch_only = True
result = Pandas(mock_dataset.fields.wins).transform(dimx2_date_str_df, dimensions, [])
dimensions[1].fetch_only = False
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.reset_index('$political_party', inplace=True, drop=True)
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_time_series_ref(self):
dimensions = [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
references = [ElectionOverElection(mock_dataset.fields.timestamp)]
result = Pandas(mock_dataset.fields.votes).transform(dimx2_date_str_ref_df, dimensions, references)
expected = dimx2_date_str_ref_df.copy()[[f('votes'), f('votes_eoe')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Votes', 'Votes EoE']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metric_format(self):
import copy
votes = copy.copy(mock_dataset.fields.votes)
votes.prefix = '$'
votes.suffix = '€'
votes.precision = 2
# divide the data frame by 3 to get a repeating decimal so we can check precision
result = Pandas(votes).transform(dimx1_date_df / 3, [mock_dataset.fields.timestamp], [])
f_votes = f('votes')
expected = dimx1_date_df.copy()[[f_votes]]
expected[f_votes] = ['${0:,.2f}€'.format(x) for x in expected[f_votes] / 3]
expected.index.names = ['Timestamp']
expected.columns = ['Votes']
expected.columns.name = 'Metrics'
pandas.testing.assert_frame_equal(expected, result)
def test_nan_in_metrics(self):
cat_dim_df_with_nan = dimx1_str_df.copy()
cat_dim_df_with_nan['$wins'] = cat_dim_df_with_nan['$wins'].apply(float)
cat_dim_df_with_nan.iloc[2, 1] = np.nan
result = Pandas(mock_dataset.fields.wins).transform(
cat_dim_df_with_nan, [mock_dataset.fields.political_party], []
)
expected = cat_dim_df_with_nan.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_inf_in_metrics(self):
cat_dim_df_with_nan = dimx1_str_df.copy()
cat_dim_df_with_nan['$wins'] = cat_dim_df_with_nan['$wins'].apply(float)
cat_dim_df_with_nan.iloc[2, 1] = np.inf
result = Pandas(mock_dataset.fields.wins).transform(
cat_dim_df_with_nan, [mock_dataset.fields.political_party], []
)
expected = cat_dim_df_with_nan.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_neginf_in_metrics(self):
cat_dim_df_with_nan = dimx1_str_df.copy()
cat_dim_df_with_nan['$wins'] = cat_dim_df_with_nan['$wins'].apply(float)
cat_dim_df_with_nan.iloc[2, 1] = np.inf
result = Pandas(mock_dataset.fields.wins).transform(
cat_dim_df_with_nan, [mock_dataset.fields.political_party], []
)
expected = cat_dim_df_with_nan.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_inf_in_metrics_with_precision_zero(self):
cat_dim_df_with_nan = dimx1_str_df.copy()
cat_dim_df_with_nan['$wins'] = cat_dim_df_with_nan['$wins'].apply(float)
cat_dim_df_with_nan.iloc[2, 1] = np.inf
mock_modified_dataset = copy.deepcopy(mock_dataset)
mock_modified_dataset.fields.wins.precision = 0
result = Pandas(mock_modified_dataset.fields.wins).transform(
cat_dim_df_with_nan, [mock_modified_dataset.fields.political_party], []
)
expected = cat_dim_df_with_nan.copy()[[f('wins')]]
expected.index = pd.Index(['Democrat', 'Independent', 'Republican'], name='Party')
expected['$wins'] = ['6', '0', 'Inf']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
pandas.testing.assert_frame_equal(expected, result)
class PandasTransformerSortTests(TestCase):
def test_metricx2_sort_index_asc(self):
result = Pandas(mock_dataset.fields.wins, sort=[0]).transform(
dimx1_date_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.sort_index()
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metricx2_sort_index_desc(self):
result = Pandas(mock_dataset.fields.wins, sort=[0], ascending=[False]).transform(
dimx1_date_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.sort_index(ascending=False)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metricx2_sort_value_asc(self):
result = Pandas(mock_dataset.fields.wins, sort=[1]).transform(
dimx1_date_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.sort_values(['Wins'])
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metricx2_sort_value_desc(self):
result = Pandas(mock_dataset.fields.wins, sort=[1], ascending=[False]).transform(
dimx1_date_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.sort_values(['Wins'], ascending=False)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_metricx2_sort_index_and_value(self):
result = Pandas(mock_dataset.fields.wins, sort=[-0, 1]).transform(
dimx1_date_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = (
expected.reset_index().sort_values(['Timestamp', 'Wins'], ascending=[True, False]).set_index('Timestamp')
)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_index_asc(self):
result = Pandas(mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[0]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.sort_index()
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_index_desc(self):
result = Pandas(
mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[0], ascending=[False]
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.sort_index(ascending=False)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_first_metric_asc(self):
result = Pandas(mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[1]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.reset_index().sort_values(['Democrat']).set_index('Timestamp')
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_metric_desc(self):
result = Pandas(
mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[1], ascending=[False]
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.reset_index().sort_values(['Democrat'], ascending=False).set_index('Timestamp')
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_metric_asc(self):
result = Pandas(mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[1]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.reset_index().sort_values(['Democrat'], ascending=True).set_index('Timestamp')
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx1_metricx2(self):
result = Pandas(
mock_dataset.fields.votes, mock_dataset.fields.wins, pivot=[mock_dataset.fields.timestamp]
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes'), f('wins')]]
expected = expected.unstack(level=0)
expected.index.names = ['Party']
expected.columns = pd.MultiIndex.from_product(
[
['Votes', 'Wins'],
pd.DatetimeIndex(['1996-01-01', '2000-01-01', '2004-01-01', '2008-01-01', '2012-01-01', '2016-01-01']),
],
names=['Metrics', 'Timestamp'],
)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_second_metric_desc(self):
result = Pandas(
mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=1, ascending=False
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.reset_index().sort_values(['Democrat'], ascending=False).set_index('Timestamp')
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_with_sort_index_and_columns(self):
result = Pandas(
mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[0, 2], ascending=[True, False]
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = (
expected.reset_index()
.sort_values(['Timestamp', 'Democrat'], ascending=[True, False])
.set_index('Timestamp')
)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_use_first_value_for_ascending_when_arg_has_invalid_length(self):
result = Pandas(
mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[0, 2], ascending=[True]
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.reset_index().sort_values(['Timestamp', 'Democrat'], ascending=True).set_index('Timestamp')
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_use_pandas_default_for_ascending_when_arg_empty_list(self):
result = Pandas(
mock_dataset.fields.votes, pivot=[mock_dataset.fields.political_party], sort=[0, 2], ascending=[]
).transform(dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], [])
expected = dimx2_date_str_df.copy()[[f('votes')]]
expected = expected.unstack(level=[1])
expected.index.names = ['Timestamp']
expected.columns = ['Democrat', 'Independent', 'Republican']
expected.columns.names = ['Party']
expected = expected.reset_index().sort_values(['Timestamp', 'Democrat'], ascending=None).set_index('Timestamp')
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx2_date_str_sort_index_level_0_default_ascending(self):
result = Pandas(mock_dataset.fields.wins, sort=[0]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.reset_index().sort_values(['Timestamp']).set_index(['Timestamp', 'Party'])
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_dimx2_date_str_sort_index_level_0_asc(self):
result = Pandas(mock_dataset.fields.wins, sort=[0], ascending=True).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.reset_index().sort_values(['Timestamp'], ascending=True).set_index(['Timestamp', 'Party'])
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_sort_index_level_1_desc(self):
result = Pandas(mock_dataset.fields.wins, sort=[1], ascending=[False]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.reset_index().sort_values(['Party'], ascending=[False]).set_index(['Timestamp', 'Party'])
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_dimx2_date_str_sort_index_and_values(self):
result = Pandas(mock_dataset.fields.wins, sort=[0, 2], ascending=[False, True]).transform(
dimx2_date_str_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party], []
)
expected = dimx2_date_str_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp', 'Party']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = (
expected.reset_index()
.sort_values(['Timestamp', 'Wins'], ascending=[False, True])
.set_index(['Timestamp', 'Party'])
)
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_empty_sort_array_is_ignored(self):
result = Pandas(mock_dataset.fields.wins, sort=[]).transform(dimx1_date_df, [mock_dataset.fields.timestamp], [])
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_sort_value_greater_than_number_of_columns_is_ignored(self):
result = Pandas(mock_dataset.fields.wins, sort=[5]).transform(
dimx1_date_df, [mock_dataset.fields.timestamp], []
)
expected = dimx1_date_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_sort_with_no_index(self):
result = Pandas(mock_dataset.fields.wins, sort=[0]).transform(no_index_df, [mock_dataset.fields.timestamp], [])
expected = no_index_df.copy()[[f('wins')]]
expected.index.names = ['Timestamp']
expected.columns = ['Wins']
expected.columns.name = 'Metrics'
expected = expected.applymap(format_float)
pandas.testing.assert_frame_equal(expected, result)
def test_pivoted_df_transformation_formats_totals_correctly(self):
test_table = Table('test')
ds = DataSet(
table=test_table,
database=test_database,
fields=[
Field('date', label='Date', definition=test_table.date, data_type=DataType.date),
Field('locale', label='Locale', definition=test_table.locale, data_type=DataType.text),
Field('company', label='Company', definition=test_table.text, data_type=DataType.text),
Field('metric1', label='Metric1', definition=Sum(test_table.number), data_type=DataType.number),
Field('metric2', label='Metric2', definition=Sum(test_table.number), data_type=DataType.number),
],
)
df = pd.DataFrame.from_dict(
{
'$metric1': {('~~totals', '~~totals'): 3, ('za', '~~totals'): 3, ('za', 'C1'): 2, ('za', 'C2'): 1},
'$metric2': {('~~totals', '~~totals'): 4, ('za', '~~totals'): 4, ('za', 'C1'): 2, ('za', 'C2'): 2},
}
)
df.index.names = [f(ds.fields.locale.alias), f(ds.fields.company.alias)]
result = Pandas(ds.fields.metric1, ds.fields.metric2, pivot=[ds.fields.company]).transform(
df, [Rollup(ds.fields.locale), Rollup(ds.fields.company)], [], use_raw_values=True
)
self.assertEqual(['Metrics', 'Company'], list(result.columns.names))
self.assertEqual(
[
('Metric1', 'C1'),
('Metric1', 'C2'),
('Metric1', 'Totals'),
('Metric2', 'C1'),
('Metric2', 'C2'),
('Metric2', 'Totals'),
],
result.columns.values.tolist(),
)
self.assertEqual(['Locale'], list(result.index.names))
self.assertEqual(['za', 'Totals'], result.index.values.tolist())
self.assertEqual([['2', '1', '3', '2', '2', '4'], ['', '', '3', '', '', '4']], result.values.tolist())
| apache-2.0 |
massmutual/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
brguez/TEIBA | src/python/srcElements_activity_swarmplot.py | 1 | 3500 | #!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import formats
import time
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
## Graphic style ##
sns.set_style("white")
sns.set_style("ticks")
## Get user's input ##
parser = argparse.ArgumentParser(description= """""")
parser.add_argument('activity', help='')
parser.add_argument('sortedSrc', help='')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
activity = args.activity
sortedSrc = args.sortedSrc
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "activity: ", activity
print "sortedSrc: ", sortedSrc
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
#### 1.
##########################
## Generate dictionary with the following format:
# - dict: key(sourceElementId) -> list[number_transductions_per_donor]
# Do not consider 0 cases.
nbTransductionsPerDonorDict = {}
# Make donor Ids list
activityFile = open(activity, 'r')
donorIdList = activityFile.readline().rstrip().split("\t")
donorIdList = donorIdList[1:] # remove first list element
tupleList = []
for line in activityFile:
nbTransductionsList = line.rstrip().split("\t")
cytobandId = nbTransductionsList.pop(0)
## Initialize dictionary for source element
## For each donor source element activity
for donorIndex, donorId in enumerate(donorIdList):
nbTransductions = int(nbTransductionsList[donorIndex])
# Not consider donors in which the source element is not active
if (nbTransductions != 0):
Tuple = (cytobandId, nbTransductions)
tupleList.append(Tuple)
## Convert into dataframe
df = pd.DataFrame(tupleList)
df.columns = ['cytobandId', 'nbTransductions']
#print dataframe
## Make source elements list
sortedSrcFile = open(sortedSrc, 'r')
sourceElementOrder = []
for line in sortedSrcFile:
colList = line.rstrip().split("\t")
cytobandId = colList[0]
sourceElementOrder.append(cytobandId)
##### Make plot
#################
fig = plt.figure(figsize=(25,5))
#ax = sns.swarmplot(x='cytobandId', y='nbTransductions', data=hotL1Df, size=3, edgecolor="gray", order=sourceElementOrder)
ax = sns.swarmplot(x='cytobandId', y='nbTransductions', data=df, size=3, edgecolor="gray", order=sourceElementOrder)
### Axis labels
ax.set_xlabel('')
ax.set_ylabel('# transductions')
# turn the axis labels
for item in ax.get_yticklabels():
item.set_rotation(0)
for item in ax.get_xticklabels():
item.set_rotation(90)
## Y ticks
ax.set(yticks=np.arange(0,91,10))
## Save figure
fileName = outDir + "/test.pdf"
plt.savefig(fileName)
####
header("Finished")
| gpl-3.0 |
lnls-sirius/dev-packages | siriuspy/siriuspy/machshift/utils.py | 1 | 9933 | """Machine shift utils."""
import re as _re
import copy as _copy
from datetime import datetime as _datetime
import numpy as _np
from matplotlib import pyplot as _plt
from .. import util as _util
from .. import clientweb as _web
class MacScheduleData:
"""Machine schedule data."""
_TAG_FORMAT_BEG = r'(\d+)h(\d+)-(\w)-(\d+\.\d)'
_TAG_FORMAT_END = r'(\d+)h(\d+)-(\w)'
_mac_schedule_sdata = dict()
_mac_schedule_ndata_byshift = dict()
_mac_schedule_ndata_byday = dict()
_mac_schedule_ndata_inicurr = dict()
@staticmethod
def get_mac_schedule_data(year, formating='plain'):
"""Get machine schedule data for year."""
MacScheduleData._reload_mac_schedule_data(year)
if formating == 'plain':
data = MacScheduleData._mac_schedule_sdata[year]
mac_schedule = _copy.deepcopy(data)
elif formating == 'numeric_byshift':
data = MacScheduleData._mac_schedule_ndata_byshift[year]
mac_schedule = list(zip(*data))
elif formating == 'numeric_byday':
data = MacScheduleData._mac_schedule_ndata_byday[year]
mac_schedule = list(zip(*data))
else:
raise NotImplementedError(
"machine schedule for formating '{}' "
"is not defined".format(formating))
return mac_schedule
@staticmethod
def get_users_shift_count(begin, end):
"""Get users shift count for a period."""
begin, end = MacScheduleData._handle_interval_data(begin, end)
_, tags = MacScheduleData._get_numeric_data_for_interval(
begin, end, dtype='macsched_byshift')
return _np.sum(tags) if begin != end else 0
@staticmethod
def get_users_shift_day_count(begin, end):
"""Get users shift day count for a period."""
begin, end = MacScheduleData._handle_interval_data(begin, end)
_, tags = MacScheduleData._get_numeric_data_for_interval(
begin, end, dtype='macsched_byday')
return _np.sum(tags) if begin != end else 0
@staticmethod
def is_user_shift_programmed(
timestamp=None, datetime=None,
year=None, month=None, day=None, hour=0, minute=0):
"""Return whether a day is a predefined user shift."""
timestamp, datetime, ret_uni = MacScheduleData._handle_timestamp_data(
timestamp, datetime, year, month, day, hour, minute)
times, tags = MacScheduleData._get_numeric_data_for_interval(
datetime[0], datetime[-1], dtype='macsched_byshift')
val = _interp1d_previous(times, tags, timestamp)
return bool(val) if ret_uni else val
@staticmethod
def get_initial_current_programmed(
timestamp=None, datetime=None,
year=None, month=None, day=None, hour=0, minute=0):
"""Return initial current for shift."""
timestamp, datetime, ret_uni = MacScheduleData._handle_timestamp_data(
timestamp, datetime, year, month, day, hour, minute)
times, currs = MacScheduleData._get_numeric_data_for_interval(
datetime[0], datetime[-1], dtype='initial_current')
val = _interp1d_previous(times, currs, timestamp)
return val[0] if ret_uni else val
@staticmethod
def plot_mac_schedule(year):
"""Get machine schedule data for year."""
MacScheduleData._reload_mac_schedule_data(year)
times, tags = MacScheduleData.get_mac_schedule_data(
year, formating='numeric_byshift')
days_of_year = len(MacScheduleData._mac_schedule_sdata[year])
new_timestamp = _np.linspace(times[0], times[-1], days_of_year*24*60)
new_datetimes = [_datetime.fromtimestamp(ts) for ts in new_timestamp]
new_tags = _interp1d_previous(times, tags, new_timestamp)
fig = _plt.figure()
_plt.plot_date(new_datetimes, new_tags, '-')
_plt.title('Machine Schedule - ' + str(year))
return fig
# --- private methods ---
@staticmethod
def _reload_mac_schedule_data(year):
if year in MacScheduleData._mac_schedule_sdata:
return
if not _web.server_online():
raise Exception('could not connect to web server')
try:
data, _ = _util.read_text_data(_web.mac_schedule_read(year))
except Exception:
print('No data provided for year ' + str(year) + '. '
'Getting template data.')
data, _ = _util.read_text_data(_web.mac_schedule_read('template'))
databyshift = list()
databyday = list()
datainicurr = list()
for datum in data:
if len(datum) < 2:
raise Exception(
'there is a date ({0}) with problem in {1} '
'machine schedule'.format(datum, year))
month, day = int(datum[0]), int(datum[1])
if len(datum) == 2:
timestamp = _datetime(year, month, day, 0, 0).timestamp()
databyshift.append((timestamp, 0))
databyday.append((timestamp, 0))
datainicurr.append((timestamp, 0.0))
else:
timestamp = _datetime(year, month, day, 0, 0).timestamp()
databyday.append((timestamp, 1))
for tag in datum[2:]:
if 'B' in tag:
hour, minute, flag, inicurr = _re.findall(
MacScheduleData._TAG_FORMAT_BEG, tag)[0]
inicurr = float(inicurr)
else:
hour, minute, flag = _re.findall(
MacScheduleData._TAG_FORMAT_END, tag)[0]
inicurr = 0.0
flag_bit = 0 if flag == 'E' else 1
hour, minute = int(hour), int(minute)
timestamp = _datetime(
year, month, day, hour, minute).timestamp()
databyshift.append((timestamp, flag_bit))
datainicurr.append((timestamp, inicurr))
MacScheduleData._mac_schedule_sdata[year] = data
MacScheduleData._mac_schedule_ndata_byshift[year] = databyshift
MacScheduleData._mac_schedule_ndata_byday[year] = databyday
MacScheduleData._mac_schedule_ndata_inicurr[year] = datainicurr
@staticmethod
def _handle_timestamp_data(
timestamp=None, datetime=None, year=None,
month=None, day=None, hour=0, minute=0):
ret_uni = False
if timestamp is not None:
if not isinstance(timestamp, (list, tuple, _np.ndarray)):
timestamp = [timestamp, ]
ret_uni = True
datetime = [_datetime.fromtimestamp(ts) for ts in timestamp]
elif datetime is not None:
if not isinstance(datetime, (list, tuple, _np.ndarray)):
datetime = [datetime, ]
ret_uni = True
timestamp = [dt.timestamp() for dt in datetime]
elif year is not None:
ret_uni = True
datetime = [_datetime(year, month, day, hour, minute), ]
timestamp = [dt.timestamp() for dt in datetime]
else:
raise Exception(
'Enter timestamp, datetime or datetime items data.')
return timestamp, datetime, ret_uni
@staticmethod
def _handle_interval_data(begin, end):
if isinstance(begin, float):
begin = _datetime.fromtimestamp(begin)
end = _datetime.fromtimestamp(end)
elif isinstance(begin, dict):
begin = _datetime(**begin)
end = _datetime(**end)
return begin, end
@staticmethod
def _get_numeric_data_for_interval(begin, end, dtype='macsched_byshift'):
times, tags = list(), list()
for y2l in _np.arange(begin.year, end.year+1):
MacScheduleData._reload_mac_schedule_data(y2l)
if dtype == 'macsched_byshift':
data = MacScheduleData._mac_schedule_ndata_byshift[y2l]
elif dtype == 'macsched_byday':
data = MacScheduleData._mac_schedule_ndata_byday[y2l]
elif dtype == 'initial_current':
data = MacScheduleData._mac_schedule_ndata_inicurr[y2l]
ytim, ytag = list(zip(*data))
times.extend(ytim)
tags.extend(ytag)
times, tags = _np.array(times), _np.array(tags)
if begin != end:
idcs = _np.where(_np.logical_and(
times >= begin.timestamp(), times <= end.timestamp()))[0]
if not idcs.size:
idcs = _np.searchsorted(times, [begin.timestamp(), ])
idcs = idcs-1 if idcs[0] != 0 else idcs
elif idcs[0] != 0:
idcs = _np.r_[idcs[0]-1, idcs]
return times[idcs], tags[idcs]
return times, tags
# This solution is a simplified version of scipy.interpolate.interp1d for
# interpolation of kind 'previous' with fill_value='extrapolate' option
def _interp1d_previous(x_org, y_org, x_new):
"""interp1d to previous."""
x_new = _np.asarray(x_new)
x_org = _np.asarray(x_org).ravel()
y_org = _np.asarray(y_org)
# Get index of left value
x_new_indices = _np.searchsorted(
_np.nextafter(x_org, -_np.inf), x_new, side='left')
# Clip x_new_indices so that they are within the range of x_org indices.
x_new_indices = x_new_indices.clip(1, len(x_org)).astype(_np.intp)
# Calculate the actual value for each entry in x_new.
y_new = y_org[x_new_indices-1]
return y_new
# Version using scipy.interpolate.interp1d
# from scipy.interpolate import interp1d as _interp1d
# def _interp1d_previous(x_org, y_org, x_new):
# """interp1d to previous."""
# fun = _interp1d(x_org, y_org, 'previous', fill_value='extrapolate')
# y_new = fun(x_new)
# return y_new
| gpl-3.0 |
dirkcgrunwald/RTLSDR-Scanner | src/dialogs_file.py | 1 | 40613 | #
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import Queue
import cPickle
import os
from PIL import Image
from matplotlib import mlab, patheffects
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.ticker import ScalarFormatter
import numpy
from wx import grid
import wx
from wx.grid import GridCellDateTimeRenderer
from wx.lib.masked.numctrl import NumCtrl, EVT_NUM
from constants import SAMPLE_RATE, TUNER
from events import Event
from file import export_image, File
from misc import format_time
from panels import PanelColourBar
from plot_line import Plotter
from spectrum import Extent, sort_spectrum, count_points
from utils_mpl import get_colours, create_heatmap
from utils_wx import ValidatorCoord
from widgets import TickCellRenderer
class DialogProperties(wx.Dialog):
def __init__(self, parent, scanInfo):
wx.Dialog.__init__(self, parent, title="Scan Properties")
self.scanInfo = scanInfo
box = wx.BoxSizer(wx.VERTICAL)
grid = wx.GridBagSizer(0, 0)
boxScan = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, "Scan"),
wx.HORIZONTAL)
gridScan = wx.GridBagSizer(0, 0)
textDesc = wx.StaticText(self, label="Description")
gridScan.Add(textDesc, (0, 0), (1, 1), wx.ALL, 5)
self.textCtrlDesc = wx.TextCtrl(self, value=scanInfo.desc,
style=wx.TE_MULTILINE)
gridScan.Add(self.textCtrlDesc, (0, 1), (2, 2), wx.ALL | wx.EXPAND, 5)
textStart = wx.StaticText(self, label="Start")
gridScan.Add(textStart, (2, 0), (1, 1), wx.ALL, 5)
textCtrlStart = wx.TextCtrl(self, value="Unknown",
style=wx.TE_READONLY)
if scanInfo.start is not None:
textCtrlStart.SetValue(str(scanInfo.start))
gridScan.Add(textCtrlStart, (2, 1), (1, 1), wx.ALL, 5)
textMHz1 = wx.StaticText(self, wx.ID_ANY, label="MHz")
gridScan.Add(textMHz1, (2, 2), (1, 1), wx.ALL, 5)
textStop = wx.StaticText(self, label="Stop")
gridScan.Add(textStop, (3, 0), (1, 1), wx.ALL, 5)
textCtrlStop = wx.TextCtrl(self, value="Unknown",
style=wx.TE_READONLY)
if scanInfo.stop is not None:
textCtrlStop.SetValue(str(scanInfo.stop))
gridScan.Add(textCtrlStop, (3, 1), (1, 1), wx.ALL, 5)
textMHz2 = wx.StaticText(self, label="MHz")
gridScan.Add(textMHz2, (3, 2), (1, 1), wx.ALL, 5)
textDwell = wx.StaticText(self, label="Dwell")
gridScan.Add(textDwell, (4, 0), (1, 1), wx.ALL, 5)
textCtrlDwell = wx.TextCtrl(self, value="Unknown",
style=wx.TE_READONLY)
if scanInfo.dwell is not None:
textCtrlDwell.SetValue(str(scanInfo.dwell))
gridScan.Add(textCtrlDwell, (4, 1), (1, 1), wx.ALL, 5)
textSeconds = wx.StaticText(self, label="seconds")
gridScan.Add(textSeconds, (4, 2), (1, 1), wx.ALL, 5)
textNfft = wx.StaticText(self, label="FFT Size")
gridScan.Add(textNfft, (5, 0), (1, 1), wx.ALL, 5)
textCtrlNfft = wx.TextCtrl(self, value="Unknown", style=wx.TE_READONLY)
if scanInfo.nfft is not None:
textCtrlNfft.SetValue(str(scanInfo.nfft))
gridScan.Add(textCtrlNfft, (5, 1), (1, 1), wx.ALL, 5)
textRbw = wx.StaticText(self, label="RBW")
gridScan.Add(textRbw, (6, 0), (1, 1), wx.ALL, 5)
rbw = ((SAMPLE_RATE / scanInfo.nfft) / 1000.0) * 2.0
textCtrlStop = wx.TextCtrl(self, value="{0:.3f}".format(rbw),
style=wx.TE_READONLY)
gridScan.Add(textCtrlStop, (6, 1), (1, 1), wx.ALL, 5)
textKHz = wx.StaticText(self, label="kHz")
gridScan.Add(textKHz, (6, 2), (1, 1), wx.ALL, 5)
textTime = wx.StaticText(self, label="First scan")
gridScan.Add(textTime, (7, 0), (1, 1), wx.ALL, 5)
textCtrlTime = wx.TextCtrl(self, value="Unknown", style=wx.TE_READONLY)
if scanInfo.timeFirst is not None:
textCtrlTime.SetValue(format_time(scanInfo.timeFirst, True))
gridScan.Add(textCtrlTime, (7, 1), (1, 1), wx.ALL, 5)
textTime = wx.StaticText(self, label="Last scan")
gridScan.Add(textTime, (8, 0), (1, 1), wx.ALL, 5)
textCtrlTime = wx.TextCtrl(self, value="Unknown", style=wx.TE_READONLY)
if scanInfo.timeLast is not None:
textCtrlTime.SetValue(format_time(scanInfo.timeLast, True))
gridScan.Add(textCtrlTime, (8, 1), (1, 1), wx.ALL, 5)
textLat = wx.StaticText(self, label="Latitude")
gridScan.Add(textLat, (9, 0), (1, 1), wx.ALL, 5)
self.textCtrlLat = wx.TextCtrl(self, value="Unknown")
self.textCtrlLat.SetValidator(ValidatorCoord(True))
if scanInfo.lat is not None:
self.textCtrlLat.SetValue(str(scanInfo.lat))
gridScan.Add(self.textCtrlLat, (9, 1), (1, 1), wx.ALL, 5)
textLon = wx.StaticText(self, label="Longitude")
gridScan.Add(textLon, (10, 0), (1, 1), wx.ALL, 5)
self.textCtrlLon = wx.TextCtrl(self, value="Unknown")
self.textCtrlLon.SetValidator(ValidatorCoord(False))
if scanInfo.lon is not None:
self.textCtrlLon.SetValue(str(scanInfo.lon))
gridScan.Add(self.textCtrlLon, (10, 1), (1, 1), wx.ALL, 5)
boxScan.Add(gridScan, 0, 0, 5)
grid.Add(boxScan, (0, 0), (1, 1), wx.ALL | wx.EXPAND, 5)
boxDevice = wx.StaticBoxSizer(wx.StaticBox(self, label="Device"),
wx.VERTICAL)
gridDevice = wx.GridBagSizer(0, 0)
textName = wx.StaticText(self, label="Name")
gridDevice.Add(textName, (0, 0), (1, 1), wx.ALL, 5)
textCtrlName = wx.TextCtrl(self, value="Unknown", style=wx.TE_READONLY)
if scanInfo.name is not None:
textCtrlName.SetValue(scanInfo.name)
gridDevice.Add(textCtrlName, (0, 1), (1, 2), wx.ALL | wx.EXPAND, 5)
textTuner = wx.StaticText(self, label="Tuner")
gridDevice.Add(textTuner, (1, 0), (1, 1), wx.ALL, 5)
textCtrlTuner = wx.TextCtrl(self, value="Unknown",
style=wx.TE_READONLY)
if scanInfo.tuner != -1:
textCtrlTuner.SetValue(TUNER[scanInfo.tuner])
gridDevice.Add(textCtrlTuner, (1, 1), (1, 2), wx.ALL | wx.EXPAND, 5)
testGain = wx.StaticText(self, label="Gain")
gridDevice.Add(testGain, (2, 0), (1, 1), wx.ALL, 5)
textCtrlGain = wx.TextCtrl(self, value="Unknown", style=wx.TE_READONLY)
if scanInfo.gain is not None:
textCtrlGain.SetValue(str(scanInfo.gain))
gridDevice.Add(textCtrlGain, (2, 1), (1, 1), wx.ALL, 5)
textDb = wx.StaticText(self, label="dB")
gridDevice.Add(textDb, (2, 2), (1, 1), wx.ALL, 5)
textLo = wx.StaticText(self, label="LO")
gridDevice.Add(textLo, (3, 0), (1, 1), wx.ALL, 5)
textCtrlLo = wx.TextCtrl(self, value="Unknown", style=wx.TE_READONLY)
if scanInfo.lo is not None:
textCtrlLo.SetValue(str(scanInfo.lo))
gridDevice.Add(textCtrlLo, (3, 1), (1, 1), wx.ALL, 5)
textMHz3 = wx.StaticText(self, label="MHz")
gridDevice.Add(textMHz3, (3, 2), (1, 1), wx.ALL, 5)
textCal = wx.StaticText(self, label="Calibration")
gridDevice.Add(textCal, (4, 0), (1, 1), wx.ALL, 5)
textCtrlCal = wx.TextCtrl(self, value="Unknown", style=wx.TE_READONLY)
if scanInfo.calibration is not None:
textCtrlCal.SetValue(str(scanInfo.calibration))
gridDevice.Add(textCtrlCal, (4, 1), (1, 1), wx.ALL, 5)
testPpm = wx.StaticText(self, label="ppm")
gridDevice.Add(testPpm, (4, 2), (1, 1), wx.ALL, 5)
boxDevice.Add(gridDevice, 1, wx.EXPAND, 5)
grid.Add(boxDevice, (1, 0), (1, 1), wx.ALL | wx.EXPAND, 5)
box.Add(grid, 1, wx.ALL | wx.EXPAND, 5)
sizerButtons = wx.StdDialogButtonSizer()
buttonOk = wx.Button(self, wx.ID_OK)
buttonCancel = wx.Button(self, wx.ID_CANCEL)
sizerButtons.AddButton(buttonOk)
sizerButtons.AddButton(buttonCancel)
sizerButtons.Realize()
self.Bind(wx.EVT_BUTTON, self.__on_ok, buttonOk)
box.Add(sizerButtons, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.SetSizerAndFit(box)
def __on_ok(self, _event):
self.scanInfo.desc = self.textCtrlDesc.GetValue()
if self.Validate():
lat = self.textCtrlLat.GetValue()
if len(lat) == 0 or lat == "-" or lat.lower() == "unknown":
self.scanInfo.lat = None
else:
self.scanInfo.lat = float(lat)
lon = self.textCtrlLon.GetValue()
if len(lon) == 0 or lon == "-" or lon.lower() == "unknown":
self.scanInfo.lon = None
else:
self.scanInfo.lon = float(lon)
self.EndModal(wx.ID_CLOSE)
class DialogImageSize(wx.Dialog):
def __init__(self, parent, settings, onlyDpi=False):
wx.Dialog.__init__(self, parent=parent, title='Image settings')
self.settings = settings
textWidth = wx.StaticText(self, label="Width (inches)")
self.ctrlWidth = NumCtrl(self, integerWidth=2, fractionWidth=1)
self.ctrlWidth.SetValue(settings.exportWidth)
self.Bind(EVT_NUM, self.__update_size, self.ctrlWidth)
textHeight = wx.StaticText(self, label="Height (inches)")
self.ctrlHeight = NumCtrl(self, integerWidth=2, fractionWidth=1)
self.ctrlHeight.SetValue(settings.exportHeight)
self.Bind(EVT_NUM, self.__update_size, self.ctrlHeight)
textDpi = wx.StaticText(self, label="Dots per inch")
self.spinDpi = wx.SpinCtrl(self)
self.spinDpi.SetRange(32, 3200)
self.spinDpi.SetValue(settings.exportDpi)
self.Bind(wx.EVT_SPINCTRL, self.__update_size, self.spinDpi)
textSize = wx.StaticText(self, label='Size')
self.textSize = wx.StaticText(self)
self.__update_size(None)
sizerButtons = wx.StdDialogButtonSizer()
buttonOk = wx.Button(self, wx.ID_OK)
buttonCancel = wx.Button(self, wx.ID_CANCEL)
sizerButtons.AddButton(buttonOk)
sizerButtons.AddButton(buttonCancel)
sizerButtons.Realize()
self.Bind(wx.EVT_BUTTON, self.__on_ok, buttonOk)
sizer = wx.GridBagSizer(5, 5)
sizer.Add(textWidth, pos=(0, 0),
flag=wx.ALL, border=5)
sizer.Add(self.ctrlWidth, pos=(0, 1),
flag=wx.ALL, border=5)
sizer.Add(textHeight, pos=(1, 0),
flag=wx.ALL, border=5)
sizer.Add(self.ctrlHeight, pos=(1, 1),
flag=wx.ALL, border=5)
sizer.Add(textDpi, pos=(2, 0),
flag=wx.ALL, border=5)
sizer.Add(self.spinDpi, pos=(2, 1),
flag=wx.ALL, border=5)
sizer.Add(textSize, pos=(3, 0),
flag=wx.ALL, border=5)
sizer.Add(self.textSize, pos=(3, 1),
flag=wx.ALL, border=5)
sizer.Add(sizerButtons, pos=(4, 0), span=(1, 2),
flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
sizer.SetEmptyCellSize((0, 0))
if onlyDpi:
textWidth.Hide()
self.ctrlWidth.Hide()
textHeight.Hide()
self.ctrlHeight.Hide()
textSize.Hide()
self.textSize.Hide()
self.SetSizerAndFit(sizer)
def __update_size(self, _event):
width = self.ctrlWidth.GetValue()
height = self.ctrlHeight.GetValue()
dpi = self.spinDpi.GetValue()
self.textSize.SetLabel('{:.0f}px x {:.0f}px'.format(width * dpi,
height * dpi))
def __on_ok(self, _event):
self.settings.exportWidth = self.ctrlWidth.GetValue()
self.settings.exportHeight = self.ctrlHeight.GetValue()
self.settings.exportDpi = self.spinDpi.GetValue()
self.EndModal(wx.ID_OK)
class DialogExportSeq(wx.Dialog):
POLL = 250
def __init__(self, parent, spectrum, settings):
self.spectrum = spectrum
self.settings = settings
self.sweeps = None
self.isExporting = False
wx.Dialog.__init__(self, parent=parent, title='Export Plot Sequence')
self.queue = Queue.Queue()
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.__on_timer, self.timer)
self.timer.Start(self.POLL)
self.figure = matplotlib.figure.Figure(facecolor='white')
self.canvas = FigureCanvas(self, -1, self.figure)
self.plot = Plotter(self.queue, self.figure, settings)
textPlot = wx.StaticText(self, label='Plot')
self.checkAxes = wx.CheckBox(self, label='Axes')
self.checkAxes.SetValue(True)
self.Bind(wx.EVT_CHECKBOX, self.__on_axes, self.checkAxes)
self.checkGrid = wx.CheckBox(self, label='Grid')
self.checkGrid.SetValue(True)
self.Bind(wx.EVT_CHECKBOX, self.__on_grid, self.checkGrid)
self.checkBar = wx.CheckBox(self, label='Bar')
self.checkBar.SetValue(True)
self.Bind(wx.EVT_CHECKBOX, self.__on_bar, self.checkBar)
sizerCheck = wx.BoxSizer(wx.HORIZONTAL)
sizerCheck.Add(self.checkAxes, flag=wx.ALL, border=5)
sizerCheck.Add(self.checkGrid, flag=wx.ALL, border=5)
sizerCheck.Add(self.checkBar, flag=wx.ALL, border=5)
textRange = wx.StaticText(self, label='Range')
self.sweepTimeStamps = sorted([timeStamp for timeStamp in spectrum.keys()])
sweepChoices = [format_time(timeStamp, True) for timeStamp in self.sweepTimeStamps]
textStart = wx.StaticText(self, label="Start")
self.choiceStart = wx.Choice(self, choices=sweepChoices)
self.choiceStart.SetSelection(0)
self.Bind(wx.EVT_CHOICE, self.__on_choice, self.choiceStart)
textEnd = wx.StaticText(self, label="End")
self.choiceEnd = wx.Choice(self, choices=sweepChoices)
self.choiceEnd.SetSelection(len(self.sweepTimeStamps) - 1)
self.Bind(wx.EVT_CHOICE, self.__on_choice, self.choiceEnd)
textSweeps = wx.StaticText(self, label='Sweeps')
self.textSweeps = wx.StaticText(self, label="")
textOutput = wx.StaticText(self, label='Output')
self.textSize = wx.StaticText(self)
buttonSize = wx.Button(self, label='Change...')
buttonSize.SetToolTipString('Change exported image size')
self.Bind(wx.EVT_BUTTON, self.__on_imagesize, buttonSize)
self.__show_image_size()
buttonBrowse = wx.Button(self, label='Browse...')
self.Bind(wx.EVT_BUTTON, self.__on_browse, buttonBrowse)
self.editDir = wx.TextCtrl(self)
self.editDir.SetValue(settings.dirExport)
font = textPlot.GetFont()
fontSize = font.GetPointSize()
font.SetPointSize(fontSize + 4)
textPlot.SetFont(font)
textRange.SetFont(font)
textOutput.SetFont(font)
sizerButtons = wx.StdDialogButtonSizer()
buttonOk = wx.Button(self, wx.ID_OK)
buttonCancel = wx.Button(self, wx.ID_CANCEL)
sizerButtons.AddButton(buttonOk)
sizerButtons.AddButton(buttonCancel)
sizerButtons.Realize()
self.Bind(wx.EVT_BUTTON, self.__on_ok, buttonOk)
sizerGrid = wx.GridBagSizer(5, 5)
sizerGrid.Add(self.canvas, pos=(0, 0), span=(10, 6),
flag=wx.EXPAND | wx.ALL, border=5)
sizerGrid.Add(textPlot, pos=(0, 7),
flag=wx.TOP | wx.BOTTOM, border=5)
sizerGrid.Add(sizerCheck, pos=(1, 7), span=(1, 2),
flag=wx.ALL, border=5)
sizerGrid.Add(textRange, pos=(2, 7),
flag=wx.TOP | wx.BOTTOM, border=5)
sizerGrid.Add(textStart, pos=(3, 7),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.ALL, border=5)
sizerGrid.Add(self.choiceStart, pos=(3, 8),
flag=wx.ALL, border=5)
sizerGrid.Add(textEnd, pos=(4, 7),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.ALL, border=5)
sizerGrid.Add(self.choiceEnd, pos=(4, 8),
flag=wx.ALL, border=5)
sizerGrid.Add(textSweeps, pos=(5, 7),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.ALL, border=5)
sizerGrid.Add(self.textSweeps, pos=(5, 8),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.ALL, border=5)
sizerGrid.Add(textOutput, pos=(6, 7),
flag=wx.TOP | wx.BOTTOM, border=5)
sizerGrid.Add(self.textSize, pos=(7, 7),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.ALL, border=5)
sizerGrid.Add(buttonSize, pos=(7, 8),
flag=wx.ALL, border=5)
sizerGrid.Add(self.editDir, pos=(8, 7), span=(1, 2),
flag=wx.ALL | wx.EXPAND, border=5)
sizerGrid.Add(buttonBrowse, pos=(9, 7),
flag=wx.ALL, border=5)
sizerGrid.Add(sizerButtons, pos=(10, 7), span=(1, 2),
flag=wx.ALIGN_RIGHT | wx.ALL, border=5)
self.SetSizerAndFit(sizerGrid)
self.__draw_plot()
def __on_choice(self, event):
start = self.choiceStart.GetSelection()
end = self.choiceEnd.GetSelection()
control = event.GetEventObject()
if start > end:
if control == self.choiceStart:
self.choiceStart.SetSelection(end)
else:
self.choiceEnd.SetSelection(start)
self.__draw_plot()
def __on_axes(self, _event):
self.plot.set_axes(self.checkAxes.GetValue())
self.__draw_plot()
def __on_grid(self, _event):
self.plot.set_grid(self.checkGrid.GetValue())
self.__draw_plot()
def __on_bar(self, _event):
self.plot.set_bar(self.checkBar.GetValue())
self.__draw_plot()
def __on_imagesize(self, _event):
dlg = DialogImageSize(self, self.settings)
dlg.ShowModal()
self.__show_image_size()
def __on_browse(self, _event):
directory = self.editDir.GetValue()
dlg = wx.DirDialog(self, 'Output directory', directory)
if dlg.ShowModal() == wx.ID_OK:
directory = dlg.GetPath()
self.editDir.SetValue(directory)
def __on_timer(self, _event):
self.timer.Stop()
if not self.isExporting:
while not self.queue.empty():
event = self.queue.get()
status = event.data.get_status()
if status == Event.DRAW:
self.canvas.draw()
self.timer.Start(self.POLL)
def __on_ok(self, _event):
self.isExporting = True
extent = Extent(self.spectrum)
dlgProgress = wx.ProgressDialog('Exporting', '', len(self.sweeps) - 1,
style=wx.PD_AUTO_HIDE |
wx.PD_CAN_ABORT |
wx.PD_REMAINING_TIME)
try:
count = 1
for timeStamp, sweep in self.sweeps.items():
name = '{0:.0f}.png'.format(timeStamp)
directory = self.editDir.GetValue()
filename = os.path.join(directory, name)
thread = self.plot.set_plot({timeStamp: sweep}, extent, False)
thread.join()
filename = os.path.join(directory, '{}.png'.format(timeStamp))
export_image(filename, File.ImageType.PNG,
self.figure,
self.settings)
cont, _skip = dlgProgress.Update(count, name)
if not cont:
break
count += 1
except IOError as error:
wx.MessageBox(error.strerror, 'Error', wx.OK | wx.ICON_WARNING)
finally:
dlgProgress.Destroy()
self.EndModal(wx.ID_OK)
def __spectrum_range(self, start, end):
sweeps = {}
for timeStamp, sweep in self.spectrum.items():
if start <= timeStamp <= end:
sweeps[timeStamp] = sweep
self.sweeps = sort_spectrum(sweeps)
def __draw_plot(self):
start, end = self.__get_range()
self.__spectrum_range(start, end)
self.textSweeps.SetLabel(str(len(self.sweeps)))
if len(self.sweeps) > 0:
total = count_points(self.sweeps)
if total > 0:
extent = Extent(self.spectrum)
self.plot.set_plot(self.sweeps, extent, False)
else:
self.plot.clear_plots()
def __show_image_size(self):
self.textSize.SetLabel('{}" x {}" @ {}dpi'.format(self.settings.exportWidth,
self.settings.exportHeight,
self.settings.exportDpi))
def __get_range(self):
start = self.sweepTimeStamps[self.choiceStart.GetSelection()]
end = self.sweepTimeStamps[self.choiceEnd.GetSelection()]
return start, end
class DialogExportGeo(wx.Dialog):
IMAGE_SIZE = 500
def __init__(self, parent, spectrum, location, settings):
self.spectrum = spectrum
self.location = location
self.settings = settings
self.directory = settings.dirExport
self.colourMap = settings.colourMap
self.colourHeat = settings.colourMap
self.canvas = None
self.extent = None
self.xyz = None
self.plotAxes = False
self.plotMesh = True
self.plotCont = True
self.plotPoint = False
self.plotHeat = False
self.plot = None
wx.Dialog.__init__(self, parent=parent, title='Export Map')
colours = get_colours()
freqMin = min(spectrum[min(spectrum)]) * 1000
freqMax = max(spectrum[min(spectrum)]) * 1000
bw = freqMax - freqMin
self.figure = matplotlib.figure.Figure(facecolor='white')
self.figure.set_size_inches((6, 6))
self.canvas = FigureCanvas(self, -1, self.figure)
self.axes = self.figure.add_subplot(111)
if matplotlib.__version__ >= '1.2':
self.figure.tight_layout()
self.figure.subplots_adjust(left=0, right=1, top=1, bottom=0)
textPlot = wx.StaticText(self, label='Plot')
self.checkAxes = wx.CheckBox(self, label='Axes')
self.checkAxes.SetValue(self.plotAxes)
self.Bind(wx.EVT_CHECKBOX, self.__on_axes, self.checkAxes)
self.checkCont = wx.CheckBox(self, label='Contour lines')
self.checkCont.SetValue(self.plotCont)
self.Bind(wx.EVT_CHECKBOX, self.__on_cont, self.checkCont)
self.checkPoint = wx.CheckBox(self, label='Locations')
self.checkPoint.SetValue(self.plotPoint)
self.Bind(wx.EVT_CHECKBOX, self.__on_point, self.checkPoint)
sizerPlotCheck = wx.BoxSizer(wx.HORIZONTAL)
sizerPlotCheck.Add(self.checkAxes, flag=wx.ALL, border=5)
sizerPlotCheck.Add(self.checkCont, flag=wx.ALL, border=5)
sizerPlotCheck.Add(self.checkPoint, flag=wx.ALL, border=5)
sizerPlot = wx.BoxSizer(wx.VERTICAL)
sizerPlot.Add(textPlot, flag=wx.ALL, border=5)
sizerPlot.Add(sizerPlotCheck, flag=wx.ALL, border=5)
textMesh = wx.StaticText(self, label='Mesh')
self.checkMesh = wx.CheckBox(self, label='On')
self.checkMesh.SetToolTipString('Signal level mesh')
self.checkMesh.SetValue(self.plotMesh)
self.Bind(wx.EVT_CHECKBOX, self.__on_mesh, self.checkMesh)
self.choiceMapMesh = wx.Choice(self, choices=colours)
self.choiceMapMesh.SetSelection(colours.index(self.colourMap))
self.Bind(wx.EVT_CHOICE, self.__on_colour_mesh, self.choiceMapMesh)
self.barMesh = PanelColourBar(self, self.colourMap)
sizerMapMesh = wx.BoxSizer(wx.HORIZONTAL)
sizerMapMesh.Add(self.choiceMapMesh, flag=wx.ALL, border=5)
sizerMapMesh.Add(self.barMesh, flag=wx.ALL, border=5)
sizerMesh = wx.BoxSizer(wx.VERTICAL)
sizerMesh.Add(textMesh, flag=wx.ALL, border=5)
sizerMesh.Add(self.checkMesh, flag=wx.ALL, border=5)
sizerMesh.Add(sizerMapMesh, flag=wx.ALL, border=5)
colours = get_colours()
textHeat = wx.StaticText(self, label='Heat map')
self.checkHeat = wx.CheckBox(self, label='On')
self.checkHeat.SetToolTipString('GPS location heatmap')
self.checkHeat.SetValue(self.plotHeat)
self.Bind(wx.EVT_CHECKBOX, self.__on_heat, self.checkHeat)
self.choiceMapHeat = wx.Choice(self, choices=colours)
self.choiceMapHeat.SetSelection(colours.index(self.colourHeat))
self.Bind(wx.EVT_CHOICE, self.__on_colour_heat, self.choiceMapHeat)
self.barHeat = PanelColourBar(self, self.colourHeat)
sizerMapHeat = wx.BoxSizer(wx.HORIZONTAL)
sizerMapHeat.Add(self.choiceMapHeat, flag=wx.ALL, border=5)
sizerMapHeat.Add(self.barHeat, flag=wx.ALL, border=5)
sizerHeat = wx.BoxSizer(wx.VERTICAL)
sizerHeat.Add(textHeat, flag=wx.ALL, border=5)
sizerHeat.Add(self.checkHeat, flag=wx.ALL, border=5)
sizerHeat.Add(sizerMapHeat, flag=wx.ALL, border=5)
textRange = wx.StaticText(self, label='Range')
textCentre = wx.StaticText(self, label='Centre')
self.spinCentre = wx.SpinCtrl(self)
self.spinCentre.SetToolTipString('Centre frequency (kHz)')
self.spinCentre.SetRange(freqMin, freqMax)
self.spinCentre.SetValue(freqMin + bw / 2)
sizerCentre = wx.BoxSizer(wx.HORIZONTAL)
sizerCentre.Add(textCentre, flag=wx.ALL, border=5)
sizerCentre.Add(self.spinCentre, flag=wx.ALL, border=5)
textBw = wx.StaticText(self, label='Bandwidth')
self.spinBw = wx.SpinCtrl(self)
self.spinBw.SetToolTipString('Bandwidth (kHz)')
self.spinBw.SetRange(1, bw)
self.spinBw.SetValue(bw / 10)
sizerBw = wx.BoxSizer(wx.HORIZONTAL)
sizerBw.Add(textBw, flag=wx.ALL, border=5)
sizerBw.Add(self.spinBw, flag=wx.ALL, border=5)
buttonUpdate = wx.Button(self, label='Update')
self.Bind(wx.EVT_BUTTON, self.__on_update, buttonUpdate)
sizerRange = wx.BoxSizer(wx.VERTICAL)
sizerRange.Add(textRange, flag=wx.ALL, border=5)
sizerRange.Add(sizerCentre, flag=wx.ALL, border=5)
sizerRange.Add(sizerBw, flag=wx.ALL, border=5)
sizerRange.Add(buttonUpdate, flag=wx.ALL, border=5)
textOutput = wx.StaticText(self, label='Output')
self.textRes = wx.StaticText(self)
buttonRes = wx.Button(self, label='Change...')
buttonRes.SetToolTipString('Change output resolution')
self.Bind(wx.EVT_BUTTON, self.__on_imageres, buttonRes)
sizerRes = wx.BoxSizer(wx.HORIZONTAL)
sizerRes.Add(self.textRes, flag=wx.ALL, border=5)
sizerRes.Add(buttonRes, flag=wx.ALL, border=5)
sizerOutput = wx.BoxSizer(wx.VERTICAL)
sizerOutput.Add(textOutput, flag=wx.ALL, border=5)
sizerOutput.Add(sizerRes, flag=wx.ALL, border=5)
self.__show_image_res()
font = textPlot.GetFont()
fontSize = font.GetPointSize()
font.SetPointSize(fontSize + 4)
textPlot.SetFont(font)
textMesh.SetFont(font)
textHeat.SetFont(font)
textRange.SetFont(font)
textOutput.SetFont(font)
sizerButtons = wx.StdDialogButtonSizer()
buttonOk = wx.Button(self, wx.ID_OK)
buttonCancel = wx.Button(self, wx.ID_CANCEL)
sizerButtons.AddButton(buttonOk)
sizerButtons.AddButton(buttonCancel)
sizerButtons.Realize()
self.Bind(wx.EVT_BUTTON, self.__on_ok, buttonOk)
self.__setup_plot()
sizerGrid = wx.GridBagSizer(5, 5)
sizerGrid.Add(self.canvas, pos=(0, 0), span=(5, 6),
flag=wx.EXPAND | wx.ALL, border=5)
sizerGrid.Add(sizerPlot, pos=(0, 7),
flag=wx.EXPAND | wx.ALL, border=5)
sizerGrid.Add(sizerMesh, pos=(1, 7),
flag=wx.EXPAND | wx.ALL, border=5)
sizerGrid.Add(sizerHeat, pos=(2, 7),
flag=wx.EXPAND | wx.ALL, border=5)
sizerGrid.Add(sizerRange, pos=(3, 7),
flag=wx.EXPAND | wx.ALL, border=5)
sizerGrid.Add(sizerOutput, pos=(4, 7),
flag=wx.EXPAND | wx.ALL, border=5)
sizerGrid.Add(sizerButtons, pos=(5, 7), span=(1, 2),
flag=wx.ALIGN_RIGHT | wx.ALL, border=5)
self.SetSizerAndFit(sizerGrid)
self.__draw_plot()
def __setup_plot(self):
self.axes.clear()
self.choiceMapMesh.Enable(self.plotMesh)
self.choiceMapHeat.Enable(self.plotHeat)
self.axes.set_xlabel('Longitude ($^\circ$)')
self.axes.set_ylabel('Latitude ($^\circ$)')
self.axes.set_xlim(auto=True)
self.axes.set_ylim(auto=True)
formatter = ScalarFormatter(useOffset=False)
self.axes.xaxis.set_major_formatter(formatter)
self.axes.yaxis.set_major_formatter(formatter)
def __draw_plot(self):
x = []
y = []
z = []
freqCentre = self.spinCentre.GetValue()
freqBw = self.spinBw.GetValue()
freqMin = (freqCentre - freqBw) / 1000.
freqMax = (freqCentre + freqBw) / 1000.
for timeStamp in self.spectrum:
spectrum = self.spectrum[timeStamp]
sweep = [yv for xv, yv in spectrum.items() if freqMin <= xv <= freqMax]
if len(sweep):
peak = max(sweep)
try:
location = self.location[timeStamp]
except KeyError:
continue
x.append(location[1])
y.append(location[0])
z.append(peak)
if len(x) == 0:
self.__draw_warning()
return
self.extent = (min(x), max(x), min(y), max(y))
self.xyz = (x, y, z)
xi = numpy.linspace(min(x), max(x), self.IMAGE_SIZE)
yi = numpy.linspace(min(y), max(y), self.IMAGE_SIZE)
if self.plotMesh or self.plotCont:
try:
zi = mlab.griddata(x, y, z, xi, yi)
except:
self.__draw_warning()
return
if self.plotMesh:
self.plot = self.axes.pcolormesh(xi, yi, zi, cmap=self.colourMap)
self.plot.set_zorder(1)
if self.plotCont:
contours = self.axes.contour(xi, yi, zi, linewidths=0.5,
colors='k')
self.axes.clabel(contours, inline=1, fontsize='x-small',
gid='clabel', zorder=3)
if self.plotHeat:
image = create_heatmap(x, y,
self.IMAGE_SIZE, self.IMAGE_SIZE / 10,
self.colourHeat)
heatMap = self.axes.imshow(image, extent=self.extent)
heatMap.set_zorder(2)
if self.plotPoint:
self.axes.plot(x, y, 'wo')
for posX, posY, posZ in zip(x, y, z):
points = self.axes.annotate('{0:.2f}dB'.format(posZ), xy=(posX, posY),
xytext=(-5, 5), ha='right',
textcoords='offset points')
points.set_zorder(3)
if matplotlib.__version__ >= '1.3':
effect = patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)
for child in self.axes.get_children():
child.set_path_effects([effect])
if self.plotAxes:
self.axes.set_axis_on()
else:
self.axes.set_axis_off()
self.canvas.draw()
def __draw_warning(self):
self.axes.text(0.5, 0.5, 'Insufficient GPS data',
ha='center', va='center',
transform=self.axes.transAxes)
def __on_update(self, _event):
self.__setup_plot()
self.__draw_plot()
def __on_imageres(self, _event):
dlg = DialogImageSize(self, self.settings, True)
dlg.ShowModal()
self.__show_image_res()
def __on_ok(self, _event):
self.EndModal(wx.ID_OK)
def __on_axes(self, _event):
self.plotAxes = self.checkAxes.GetValue()
if self.plotAxes:
self.axes.set_axis_on()
else:
self.axes.set_axis_off()
self.canvas.draw()
def __on_mesh(self, _event):
self.plotMesh = self.checkMesh.GetValue()
self.__on_update(None)
def __on_cont(self, _event):
self.plotCont = self.checkCont.GetValue()
self.__on_update(None)
def __on_point(self, _event):
self.plotPoint = self.checkPoint.GetValue()
self.__on_update(None)
def __on_heat(self, _event):
self.plotHeat = self.checkHeat.GetValue()
self.__on_update(None)
def __on_colour_mesh(self, _event):
self.colourMesh = self.choiceMapMesh.GetStringSelection()
self.barMesh.set_map(self.colourMesh)
if self.plot:
self.plot.set_cmap(self.colourMesh)
self.canvas.draw()
def __on_colour_heat(self, _event):
self.colourHeat = self.choiceMapHeat.GetStringSelection()
self.barHeat.set_map(self.colourHeat)
self.__on_update(None)
def __show_image_res(self):
self.textRes.SetLabel('{}dpi'.format(self.settings.exportDpi))
def get_filename(self):
return self.filename
def get_directory(self):
return self.directory
def get_extent(self):
return self.extent
def get_image(self):
width = self.extent[1] - self.extent[0]
height = self.extent[3] - self.extent[2]
self.figure.set_size_inches((6, 6. * width / height))
self.figure.set_dpi(self.settings.exportDpi)
self.figure.patch.set_alpha(0)
self.axes.axesPatch.set_alpha(0)
canvas = FigureCanvasAgg(self.figure)
canvas.draw()
renderer = canvas.get_renderer()
if matplotlib.__version__ >= '1.2':
buf = renderer.buffer_rgba()
else:
buf = renderer.buffer_rgba(0, 0)
size = canvas.get_width_height()
image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
return image
def get_xyz(self):
return self.xyz
class DialogSaveWarn(wx.Dialog):
def __init__(self, parent, warnType):
self.code = -1
wx.Dialog.__init__(self, parent=parent, title="Warning")
prompt = ["scanning again", "opening a file",
"exiting", "clearing", "merging"][warnType]
text = wx.StaticText(self,
label="Save plot_line before {}?".format(prompt))
icon = wx.StaticBitmap(self, wx.ID_ANY,
wx.ArtProvider.GetBitmap(wx.ART_INFORMATION,
wx.ART_MESSAGE_BOX))
tbox = wx.BoxSizer(wx.HORIZONTAL)
tbox.Add(text)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(icon, 0, wx.ALL, 5)
hbox.Add(tbox, 0, wx.ALL, 5)
buttonYes = wx.Button(self, wx.ID_YES, 'Yes')
buttonNo = wx.Button(self, wx.ID_NO, 'No')
buttonCancel = wx.Button(self, wx.ID_CANCEL, 'Cancel')
buttonYes.Bind(wx.EVT_BUTTON, self.__on_close)
buttonNo.Bind(wx.EVT_BUTTON, self.__on_close)
buttons = wx.StdDialogButtonSizer()
buttons.AddButton(buttonYes)
buttons.AddButton(buttonNo)
buttons.AddButton(buttonCancel)
buttons.Realize()
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(hbox, 1, wx.ALL | wx.EXPAND, 10)
vbox.Add(buttons, 1, wx.ALL | wx.EXPAND, 10)
self.SetSizerAndFit(vbox)
def __on_close(self, event):
self.EndModal(event.GetId())
return
def get_code(self):
return self.code
class DialogRestore(wx.Dialog):
COL_SEL, COL_TIME, COL_SIZE = range(3)
def __init__(self, parent, backups):
self.selected = 0
self.backups = backups
self.restored = None
wx.Dialog.__init__(self, parent=parent, title='Restore backups')
self.grid = grid.Grid(self)
self.grid.CreateGrid(1, 3)
self.grid.SetRowLabelSize(0)
self.grid.SetColLabelValue(self.COL_SEL, 'Selected')
self.grid.SetColLabelValue(self.COL_TIME, 'Time')
self.grid.SetColLabelValue(self.COL_SIZE, 'Size (k)')
self.grid.SetColFormatFloat(self.COL_SIZE, -1, 1)
self.__set_grid()
self.Bind(grid.EVT_GRID_CELL_LEFT_CLICK, self.__on_click)
buttonRest = wx.Button(self, wx.ID_OPEN, 'Restore')
buttonDel = wx.Button(self, wx.ID_DELETE, 'Delete')
buttonCancel = wx.Button(self, wx.ID_CANCEL, 'Close')
buttonRest.Bind(wx.EVT_BUTTON, self.__on_restore)
buttonDel.Bind(wx.EVT_BUTTON, self.__on_delete)
sizerButtons = wx.BoxSizer(wx.HORIZONTAL)
sizerButtons.Add(buttonRest)
sizerButtons.Add(buttonDel)
sizerButtons.Add(buttonCancel)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.grid, flag=wx.ALL | wx.EXPAND, border=5)
sizer.Add(sizerButtons, flag=wx.ALL, border=5)
self.SetSizerAndFit(sizer)
def __set_grid(self):
self.grid.DeleteRows(0, self.grid.GetNumberRows())
self.grid.AppendRows(len(self.backups.backups))
i = 0
for backup in self.backups.backups:
self.grid.SetCellRenderer(i, self.COL_SEL, TickCellRenderer())
self.grid.SetCellRenderer(i, self.COL_TIME,
GridCellDateTimeRenderer())
self.grid.SetReadOnly(i, self.COL_TIME, True)
self.grid.SetReadOnly(i, self.COL_SIZE, True)
self.grid.SetCellValue(i, self.COL_TIME,
str(backup[1].replace(microsecond=0)))
self.grid.SetCellValue(i, self.COL_SIZE, str(backup[2]))
i += 1
self.__select_row(0)
self.grid.AutoSize()
def __on_click(self, event):
col = event.GetCol()
if col == self.COL_SEL:
row = event.GetRow()
self.selected = row
self.__select_row(row)
def __on_restore(self, event):
try:
self.restored = self.backups.load(self.selected)
except (cPickle.UnpicklingError, AttributeError,
EOFError, ImportError, IndexError, ValueError):
wx.MessageBox('The file could not be restored', 'Restore failed',
wx.OK | wx.ICON_ERROR)
return
self.EndModal(event.GetId())
def __on_delete(self, _event):
dlg = wx.MessageDialog(self, 'Delete the selected backup?',
'Delete backup',
wx.OK | wx.CANCEL | wx.ICON_QUESTION)
if dlg.ShowModal() == wx.ID_OK:
self.backups.delete(self.selected)
self.__set_grid()
def __select_row(self, index):
self.grid.ClearSelection()
for i in range(0, self.grid.GetNumberRows()):
tick = "0"
if i == index:
tick = "1"
self.grid.SetCellValue(i, self.COL_SEL, tick)
def get_restored(self):
return self.restored
if __name__ == '__main__':
print 'Please run rtlsdr_scan.py'
exit(1)
| gpl-3.0 |
kjung/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
SuLab/scheduled-bots | scheduled_bots/geneprotein/MicrobialChromosomeBot.py | 1 | 7441 | import subprocess
from datetime import datetime
import pandas as pd
from scheduled_bots import get_default_core_props
from scheduled_bots.geneprotein import PROPS
from wikidataintegrator import wdi_core, wdi_helpers
from wikidataintegrator.wdi_helpers import prop2qid
core_props = get_default_core_props()
class MicrobialChromosomeBot:
chr_type_map = {'chromosome': 'Q37748',
'mitochondrion': 'Q18694495',
'chloroplast': 'Q22329079',
'plasmid': 'Q172778',
'circular': 'Q5121654'}
def __init__(self):
self.df = pd.DataFrame()
def get_microbial_ref_genome_table(self):
"""
# Download and parse Microbial Reference and representative genomes table
# https://www.ncbi.nlm.nih.gov/genome/browse/reference/
# but oh wait, it has no useful fields that we need (taxid, accession, chromosome)
url = "https://www.ncbi.nlm.nih.gov/genomes/Genome2BE/genome2srv.cgi?action=refgenomes&download=on&type=reference"
df_ref = pd.read_csv(url, sep="\t", dtype=object, header=0)
"""
# get the assembly accessions of the reference genomes
url = "ftp://ftp.ncbi.nlm.nih.gov/genomes/refseq/bacteria/assembly_summary.txt"
subprocess.check_output(['wget', '-N', url])
assdf = pd.read_csv("assembly_summary.txt", sep="\t", dtype=object, header=0, skiprows=1)
assdf = assdf.query("refseq_category == 'reference genome'")
accessions = set(assdf.gbrs_paired_asm)
# adding Chlamydia muridarum str. Nigg
accessions.add("GCA_000006685.1")
accessions.add("GCA_000012685.1")
# Download prokaryotes genome table
# but oh wait, it has no ref genome column
url = "ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/prokaryotes.txt"
subprocess.check_output(['wget', '-N', url])
df = pd.read_csv("prokaryotes.txt", sep="\t", dtype=object, header=0, error_bad_lines=False)
df = df[df['Assembly Accession'].isin(accessions)]
df = df.rename(columns={df.columns[0]: df.columns[0][1:]})
# columns = ['Organism/Name', 'TaxID', 'BioProject Accession', 'BioProject ID', 'Group', 'SubGroup', 'Size (Mb)',
# 'GC%', 'Replicons', 'WGS', 'Scaffolds', 'Genes', 'Proteins', 'Release Date',
# 'Modify Date', 'Status', 'Center', 'BioSample Accession', 'Assembly Accession', 'Reference',
# 'FTP Path', 'Pubmed ID', 'Strain']
print("Found {} reference genomes".format(len(df)))
assert len(set(df.TaxID)) == len(df)
assert 110 < len(df) < 140
self.df = df
def get_chromosome_info(self, taxid):
# replicons column looks like:
# 'chromosome circular:NC_003062.2/AE007869.2; chromosome linear:NC_003063.2/AE007870.2; plasmid At:NC_003064.2/AE007872.2; plasmid Ti:NC_003065.3/AE007871.2'
replicons = self.df.loc[self.df.TaxID == taxid, 'Replicons'].values[0]
chroms = [{'name': x.split(":")[0].strip(),
'refseq': x.split(":")[1].split("/")[0].strip()} for x in replicons.split(";")]
return chroms
def get_or_create_chromosomes(self, taxid, login=None):
# main function to use to get or create all of the chromosomes for a bacterial organism
# returns dict with key = refseq ID, value = qid for chromosome item
if self.df.empty:
self.get_microbial_ref_genome_table()
df = self.df
taxid = str(taxid)
entry = df[df.TaxID == taxid].to_dict("records")[0]
organism_name = entry['Organism/Name']
organism_qid = prop2qid(PROPS['NCBI Taxonomy ID'], taxid)
chroms = self.get_chromosome_info(taxid)
chr_map = dict()
chr_name_type = {'chromosome circular': 'circular',
'chromosome linear': 'chromosome',
'chromosome': 'chromosome'}
for chrom in chroms:
chrom_name = chrom['name'].lower()
genome_id = chrom['refseq']
if chrom_name in chr_name_type:
chr_type = chr_name_type[chrom_name]
elif "plasmid" in chrom_name:
chr_type = 'plasmid'
else:
raise ValueError("unknown chromosome type: {}".format(chrom['name']))
qid = self.create_chrom(organism_name, organism_qid, chrom_name, genome_id, chr_type, login=login)
chr_map[chrom['refseq']] = qid
return chr_map
def create_chrom(self, organism_name, organism_qid, chrom_name, genome_id, chr_type, login):
def make_ref(retrieved, genome_id):
"""
Create reference statement for chromosomes
:param retrieved: datetime
:type retrieved: datetime
:param genome_id: refseq genome id
:type genome_id: str
:return:
"""
refs = [
wdi_core.WDItemID(value='Q20641742', prop_nr='P248', is_reference=True), # stated in ncbi gene
wdi_core.WDString(value=genome_id, prop_nr='P2249', is_reference=True), # Link to Refseq Genome ID
wdi_core.WDTime(retrieved.strftime('+%Y-%m-%dT00:00:00Z'), prop_nr='P813', is_reference=True)
]
return refs
item_name = '{} {}'.format(organism_name, chrom_name)
item_description = 'bacterial {}'.format(chr_type)
print(genome_id)
retrieved = datetime.now()
reference = make_ref(retrieved, genome_id)
# instance of chr_type
chr_type = chr_type.lower()
if chr_type not in self.chr_type_map:
raise ValueError("unknown chromosome type: {}".format(chr_type))
statements = [wdi_core.WDItemID(value=self.chr_type_map[chr_type], prop_nr='P31', references=[reference])]
# found in taxon
statements.append(wdi_core.WDItemID(value=organism_qid, prop_nr='P703', references=[reference]))
# genome id
statements.append(wdi_core.WDString(value=genome_id, prop_nr='P2249', references=[reference]))
wd_item = wdi_core.WDItemEngine(data=statements,
append_value=['P31'], fast_run=True,
fast_run_base_filter={'P703': organism_qid, 'P2249': ''},
core_props=core_props)
if wd_item.wd_item_id:
return wd_item.wd_item_id
if login is None:
raise ValueError("Login is required to create item")
wd_item.set_label(item_name)
wd_item.set_description(item_description, lang='en')
wdi_helpers.try_write(wd_item, genome_id, 'P2249', login)
return wd_item.wd_item_id
def get_all_taxids(self):
if self.df.empty:
self.get_microbial_ref_genome_table()
return set(self.df.TaxID)
def get_organism_info(self, taxid):
taxid = str(taxid)
if taxid not in self.get_all_taxids():
raise ValueError("taxid {} not found in microbe ref genomes".format(taxid))
entry = self.df[self.df.TaxID == taxid].to_dict("records")[0]
qid = prop2qid(PROPS['NCBI Taxonomy ID'], taxid)
return {'name': entry['Organism/Name'],
'type': "microbial",
'wdid': qid,
'qid': qid,
'taxid': taxid}
| mit |
anjalisood/spark-tk | regression-tests/sparktkregtests/testcases/frames/boxcox_test.py | 12 | 5074 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test frame.box_cox() and frame.reverse_box_cox()"""
import unittest
from sparktkregtests.lib import sparktk_test
class BoxCoxTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(BoxCoxTest, self).setUp()
dataset =\
[[5.8813080107727425], [8.9771372790941797], [8.9153072947470804],
[8.1583747730768401], [0.35889585616853292]]
schema = [("y", float)]
self.frame = self.context.frame.create(dataset, schema=schema)
def test_wt_default(self):
""" Test behaviour for default params, lambda = 0 """
self.frame.box_cox("y")
actual = self.frame.to_pandas()["y_lambda_0.0"].tolist()
expected =\
[1.7717791879837133, 2.1946810429706676,
2.1877697201262163, 2.0990449791729704, -1.0247230268174008]
self.assertItemsEqual(actual, expected)
def test_lambda(self):
""" Test wt for lambda = 0.3 """
self.frame.box_cox("y", 0.3)
actual = self.frame.to_pandas()["y_lambda_0.3"].tolist()
expected =\
[2.3384668540844573, 3.1056915770236082,
3.0923547540771801, 2.9235756971904037, -0.88218677941017198]
self.assertItemsEqual(actual, expected)
def test_reverse_default(self):
""" Test reverse transform for default lambda = 0 """
self.frame.box_cox("y")
self.frame.reverse_box_cox("y_lambda_0.0",
reverse_box_cox_column_name="reverse")
actual = self.frame.to_pandas()["reverse"].tolist()
expected =\
[5.8813080107727425, 8.9771372790941815,
8.9153072947470804, 8.1583747730768401, 0.35889585616853298]
self.assertItemsEqual(actual, expected)
def test_reverse_lambda(self):
""" Test reverse transform for lambda = 0.3 """
self.frame.box_cox("y", 0.3)
self.frame.reverse_box_cox("y_lambda_0.3", 0.3,
reverse_box_cox_column_name="reverse")
actual = self.frame.to_pandas()["reverse"].tolist()
expected =\
[5.8813080107727442, 8.9771372790941797,
8.9153072947470822, 8.1583747730768419,
0.35889585616853298]
self.assertItemsEqual(actual, expected)
@unittest.skip("req not clear")
def test_lambda_negative(self):
""" Test box cox for lambda -1 """
self.frame.box_cox("y", -1)
actual = self.frame.to_pandas()["y_lambda_-1.0"].tolist()
expected =\
[0.82996979614597488, 0.88860591423406388,
0.88783336715839256, 0.87742656744575354,
-1.7863236167608822]
self.assertItemsEqual(actual, expected)
def test_existing_boxcox_column(self):
""" Test behavior for existing boxcox column """
self.frame.box_cox("y", 0.3)
with self.assertRaisesRegexp(
Exception, "duplicate column name"):
self.frame.box_cox("y", 0.3)
def test_existing_reverse_column(self):
""" Test behavior for existing reverse boxcox column """
self.frame.reverse_box_cox("y", 0.3)
with self.assertRaisesRegexp(
Exception, "duplicate column name"):
self.frame.reverse_box_cox("y", 0.3)
@unittest.skip("Req not clear")
def test_negative_col_positive_lambda(self):
"""Test behaviour for negative input column and positive lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
frame.box_cox("y", 1)
actual = frame.to_pandas()["y_lambda_1.0"].tolist()
expected = [-2.0, -3.0, 0]
self.assertItemsEqual(actual, expected)
@unittest.skip("Req not clear")
def test_negative_col_frational_lambda(self):
"""Test behaviour for negative input column and negative lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
with self.assertRaises(Exception):
frame.box_cox("y", 0.1)
@unittest.skip("Req not clear")
def test_negative_col_zero_lambda(self):
"""Test behaviour for negative input column and positive lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
with self.assertRaises(Exception):
frame.box_cox("y")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
AnasGhrab/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | 6117 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
ambikeshwar1991/sandhi-2 | module/gr36/gr-controls/python/qa_dsim.py | 7 | 1797 | #!/usr/bin/env python
#
# Copyright 2013 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
#import mymod_swig as mymod
from dsim import dsim
class qa_dsim (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
src_data = [0]*100
src_data1 = [1]*1000
src_data = tuple(src_data+src_data1)
expected_result = (-2.0, 0.0, 5.0, 8.0, 9.0, 11.0, 14.0, 18.0)
src0 = gr.vector_source_f(src_data)
sqr = dsim()
sqr.set_parameters(2,0.5,0.6,1,1, 0.1, 2, 1, 1100)
#Preload
sqr.input_config(1).preload_items = 1
dst = gr.vector_sink_f()
self.tb.connect(src0, (sqr,0)) # src0(vector_source) -> sqr_input_0
self.tb.connect(sqr,dst) # sqr_output_0 -> dst (vector_source)
self.tb.run()
result_data = dst.data()
import matplotlib.pyplot as plt
plt.plot(result_data)
plt.show()
#self.assertFloatTuplesAlmostEqual(expected_result, result_data, 6)
if __name__ == '__main__':
gr_unittest.main()
#gr_unittest.run(qa_dsim, "qa_dsim.xml")
| gpl-3.0 |
pnedunuri/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 71 | 18815 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
nicproulx/mne-python | tutorials/plot_stats_cluster_1samp_test_time_frequency.py | 9 | 4065 | """
.. _tut_stats_cluster_sensor_1samp_tfr:
===============================================================
Non-parametric 1 sample cluster statistic on single trial power
===============================================================
This script shows how to estimate significant clusters
in time-frequency power estimates. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists in:
- extracting epochs
- compute single trial power estimates
- baseline line correct the power estimates (power ratios)
- compute stats to see if ratio deviates from 1.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_1samp_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
tmin, tmax, event_id = -0.3, 0.6, 1
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
# Take only one channel
ch_name = 'MEG 1332'
epochs.pick_channels([ch_name])
evoked = epochs.average()
# Factor to down-sample the temporal dimension of the TFR computed by
# tfr_morlet. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly computational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 5
frequencies = np.arange(8, 40, 2) # define frequencies of interest
sfreq = raw.info['sfreq'] # sampling in Hz
tfr_epochs = tfr_morlet(epochs, frequencies, n_cycles=4., decim=decim,
average=False, return_itc=False, n_jobs=1)
# Baseline power
tfr_epochs.apply_baseline(mode='logratio', baseline=(-.100, 0))
# Crop in time to keep only what is between 0 and 400 ms
evoked.crop(0., 0.4)
tfr_epochs.crop(0., 0.4)
epochs_power = tfr_epochs.data[:, 0, :, :] # take the 1 channel
###############################################################################
# Compute statistic
# -----------------
threshold = 2.5
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_1samp_test(epochs_power, n_permutations=100,
threshold=threshold, tail=0)
###############################################################################
# View time-frequency plots
# -------------------------
evoked_data = evoked.data
times = 1e3 * evoked.times
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
vmax = np.max(np.abs(T_obs))
vmin = -vmax
plt.subplot(2, 1, 1)
plt.imshow(T_obs, cmap=plt.cm.gray,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.imshow(T_obs_plot, cmap=plt.cm.RdBu_r,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.colorbar()
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
ax2 = plt.subplot(2, 1, 2)
evoked.plot(axes=[ax2])
plt.show()
| bsd-3-clause |
jreback/pandas | pandas/tests/io/parser/test_read_fwf.py | 1 | 21041 | """
Tests the 'read_fwf' function in parsers.py. This
test suite is independent of the others because the
engine is set to 'python-fwf' internally.
"""
from datetime import datetime
from io import BytesIO, StringIO
from pathlib import Path
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, DatetimeIndex
import pandas._testing as tm
from pandas.io.parsers import EmptyDataError, read_csv, read_fwf
def test_basic():
data = """\
A B C D
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
result = read_fwf(StringIO(data))
expected = DataFrame(
[
[201158, 360.242940, 149.910199, 11950.7],
[201159, 444.953632, 166.985655, 11788.4],
[201160, 364.136849, 183.628767, 11806.2],
[201161, 413.836124, 184.375703, 11916.8],
[201162, 502.953953, 173.237159, 12468.3],
],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
def test_colspecs():
data = """\
A B C D E
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
result = read_fwf(StringIO(data), colspecs=colspecs)
expected = DataFrame(
[
[2011, 58, 360.242940, 149.910199, 11950.7],
[2011, 59, 444.953632, 166.985655, 11788.4],
[2011, 60, 364.136849, 183.628767, 11806.2],
[2011, 61, 413.836124, 184.375703, 11916.8],
[2011, 62, 502.953953, 173.237159, 12468.3],
],
columns=["A", "B", "C", "D", "E"],
)
tm.assert_frame_equal(result, expected)
def test_widths():
data = """\
A B C D E
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
result = read_fwf(StringIO(data), widths=[5, 5, 13, 13, 7])
expected = DataFrame(
[
[2011, 58, 360.242940, 149.910199, 11950.7],
[2011, 59, 444.953632, 166.985655, 11788.4],
[2011, 60, 364.136849, 183.628767, 11806.2],
[2011, 61, 413.836124, 184.375703, 11916.8],
[2011, 62, 502.953953, 173.237159, 12468.3],
],
columns=["A", "B", "C", "D", "E"],
)
tm.assert_frame_equal(result, expected)
def test_non_space_filler():
# From Thomas Kluyver:
#
# Apparently, some non-space filler characters can be seen, this is
# supported by specifying the 'delimiter' character:
#
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data = """\
A~~~~B~~~~C~~~~~~~~~~~~D~~~~~~~~~~~~E
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
result = read_fwf(StringIO(data), colspecs=colspecs, delimiter="~")
expected = DataFrame(
[
[2011, 58, 360.242940, 149.910199, 11950.7],
[2011, 59, 444.953632, 166.985655, 11788.4],
[2011, 60, 364.136849, 183.628767, 11806.2],
[2011, 61, 413.836124, 184.375703, 11916.8],
[2011, 62, 502.953953, 173.237159, 12468.3],
],
columns=["A", "B", "C", "D", "E"],
)
tm.assert_frame_equal(result, expected)
def test_over_specified():
data = """\
A B C D E
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
with pytest.raises(ValueError, match="must specify only one of"):
read_fwf(StringIO(data), colspecs=colspecs, widths=[6, 10, 10, 7])
def test_under_specified():
data = """\
A B C D E
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
with pytest.raises(ValueError, match="Must specify either"):
read_fwf(StringIO(data), colspecs=None, widths=None)
def test_read_csv_compat():
csv_data = """\
A,B,C,D,E
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = read_csv(StringIO(csv_data), engine="python")
fwf_data = """\
A B C D E
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
result = read_fwf(StringIO(fwf_data), colspecs=colspecs)
tm.assert_frame_equal(result, expected)
def test_bytes_io_input():
result = read_fwf(BytesIO("שלום\nשלום".encode()), widths=[2, 2], encoding="utf8")
expected = DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
def test_fwf_colspecs_is_list_or_tuple():
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = "column specifications must be a list or tuple.+"
with pytest.raises(TypeError, match=msg):
read_fwf(StringIO(data), colspecs={"a": 1}, delimiter=",")
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples():
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = "Each column specification must be.+"
with pytest.raises(TypeError, match=msg):
read_fwf(StringIO(data), [("a", 1)])
@pytest.mark.parametrize(
"colspecs,exp_data",
[
([(0, 3), (3, None)], [[123, 456], [456, 789]]),
([(None, 3), (3, 6)], [[123, 456], [456, 789]]),
([(0, None), (3, None)], [[123456, 456], [456789, 789]]),
([(None, None), (3, 6)], [[123456, 456], [456789, 789]]),
],
)
def test_fwf_colspecs_none(colspecs, exp_data):
# see gh-7079
data = """\
123456
456789
"""
expected = DataFrame(exp_data)
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"infer_nrows,exp_data",
[
# infer_nrows --> colspec == [(2, 3), (5, 6)]
(1, [[1, 2], [3, 8]]),
# infer_nrows > number of rows
(10, [[1, 2], [123, 98]]),
],
)
def test_fwf_colspecs_infer_nrows(infer_nrows, exp_data):
# see gh-15138
data = """\
1 2
123 98
"""
expected = DataFrame(exp_data)
result = read_fwf(StringIO(data), infer_nrows=infer_nrows, header=None)
tm.assert_frame_equal(result, expected)
def test_fwf_regression():
# see gh-3594
#
# Turns out "T060" is parsable as a datetime slice!
tz_list = [1, 10, 20, 30, 60, 80, 100]
widths = [16] + [8] * len(tz_list)
names = ["SST"] + [f"T{z:03d}" for z in tz_list[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
result = read_fwf(
StringIO(data),
index_col=0,
header=None,
names=names,
widths=widths,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, "%Y%j%H%M%S"),
)
expected = DataFrame(
[
[9.5403, 9.4105, 8.6571, 7.8372, 6.0612, 5.8843, 5.5192],
[9.5435, 9.2010, 8.6167, 7.8176, 6.0804, 5.8728, 5.4869],
[9.5873, 9.1326, 8.4694, 7.5889, 6.0422, 5.8526, 5.4657],
[9.5810, 9.0896, 8.4009, 7.4652, 6.0322, 5.8189, 5.4379],
[9.6034, 9.0897, 8.3822, 7.4905, 6.0908, 5.7904, 5.4039],
],
index=DatetimeIndex(
[
"2009-06-13 20:20:00",
"2009-06-13 20:30:00",
"2009-06-13 20:40:00",
"2009-06-13 20:50:00",
"2009-06-13 21:00:00",
]
),
columns=["SST", "T010", "T020", "T030", "T060", "T080", "T100"],
)
tm.assert_frame_equal(result, expected)
def test_fwf_for_uint8():
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa
df = read_fwf(
StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37), (49, 51), (58, 62), (63, 1000)],
names=["time", "pri", "pgn", "dst", "src", "data"],
converters={
"pgn": lambda x: int(x, 16),
"src": lambda x: int(x, 16),
"dst": lambda x: int(x, 16),
"data": lambda x: len(x.split(" ")),
},
)
expected = DataFrame(
[
[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8],
],
columns=["time", "pri", "pgn", "dst", "src", "data"],
)
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("comment", ["#", "~", "!"])
def test_fwf_comment(comment):
data = """\
1 2. 4 #hello world
5 NaN 10.0
"""
data = data.replace("#", comment)
colspecs = [(0, 3), (4, 9), (9, 25)]
expected = DataFrame([[1, 2.0, 4], [5, np.nan, 10.0]])
result = read_fwf(StringIO(data), colspecs=colspecs, header=None, comment=comment)
tm.assert_almost_equal(result, expected)
def test_fwf_skip_blank_lines():
data = """
A B C D
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201162 502.953953 173.237159 12468.3
"""
result = read_fwf(StringIO(data), skip_blank_lines=True)
expected = DataFrame(
[
[201158, 360.242940, 149.910199, 11950.7],
[201159, 444.953632, 166.985655, 11788.4],
[201162, 502.953953, 173.237159, 12468.3],
],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
data = """\
A B C D
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201162 502.953953 173.237159 12468.3
"""
result = read_fwf(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[
[201158, 360.242940, 149.910199, 11950.7],
[201159, 444.953632, 166.985655, 11788.4],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[201162, 502.953953, 173.237159, 12468.3],
],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("thousands", [",", "#", "~"])
def test_fwf_thousands(thousands):
data = """\
1 2,334.0 5
10 13 10.
"""
data = data.replace(",", thousands)
colspecs = [(0, 3), (3, 11), (12, 16)]
expected = DataFrame([[1, 2334.0, 5], [10, 13, 10.0]])
result = read_fwf(
StringIO(data), header=None, colspecs=colspecs, thousands=thousands
)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("header", [True, False])
def test_bool_header_arg(header):
# see gh-6114
data = """\
MyColumn
a
b
a
b"""
msg = "Passing a bool to header is invalid"
with pytest.raises(TypeError, match=msg):
read_fwf(StringIO(data), header=header)
def test_full_file():
# File with all values.
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
result = read_fwf(StringIO(test))
tm.assert_frame_equal(result, expected)
def test_full_file_with_missing():
# File with missing values.
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
result = read_fwf(StringIO(test))
tm.assert_frame_equal(result, expected)
def test_full_file_with_spaces():
# File with spaces in columns.
test = """
Account Name Balance CreditLimit AccountCreated
101 Keanu Reeves 9315.45 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 Jennifer Love Hewitt 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65 5000.00 2/5/2007
""".strip(
"\r\n"
)
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
result = read_fwf(StringIO(test))
tm.assert_frame_equal(result, expected)
def test_full_file_with_spaces_and_missing():
# File with spaces and missing values in columns.
test = """
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip(
"\r\n"
)
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
result = read_fwf(StringIO(test))
tm.assert_frame_equal(result, expected)
def test_messed_up_data():
# Completely messed up file.
test = """
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip(
"\r\n"
)
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
result = read_fwf(StringIO(test))
tm.assert_frame_equal(result, expected)
def test_multiple_delimiters():
test = r"""
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~Keanu Reeves
33+++122.33\\\bar.........Gerard Butler
++44~~~~12.01 baz~~Jennifer Love Hewitt
~~55 11+++foo++++Jada Pinkett-Smith
..66++++++.03~~~bar Bill Murray
""".strip(
"\r\n"
)
delimiter = " +~.\\"
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs, delimiter=delimiter)
result = read_fwf(StringIO(test), delimiter=delimiter)
tm.assert_frame_equal(result, expected)
def test_variable_width_unicode():
data = """
שלום שלום
ום שלל
של ום
""".strip(
"\r\n"
)
encoding = "utf8"
kwargs = {"header": None, "encoding": encoding}
expected = read_fwf(
BytesIO(data.encode(encoding)), colspecs=[(0, 4), (5, 9)], **kwargs
)
result = read_fwf(BytesIO(data.encode(encoding)), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [{}, {"a": "float64", "b": str, "c": "int32"}])
def test_dtype(dtype):
data = """ a b c
1 2 3.2
3 4 5.2
"""
colspecs = [(0, 5), (5, 10), (10, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, dtype=dtype)
expected = DataFrame(
{"a": [1, 3], "b": [2, 4], "c": [3.2, 5.2]}, columns=["a", "b", "c"]
)
for col, dt in dtype.items():
expected[col] = expected[col].astype(dt)
tm.assert_frame_equal(result, expected)
def test_skiprows_inference():
# see gh-11256
data = """
Text contained in the file header
DataCol1 DataCol2
0.0 1.0
101.6 956.1
""".strip()
skiprows = 2
expected = read_csv(StringIO(data), skiprows=skiprows, delim_whitespace=True)
result = read_fwf(StringIO(data), skiprows=skiprows)
tm.assert_frame_equal(result, expected)
def test_skiprows_by_index_inference():
data = """
To be skipped
Not To Be Skipped
Once more to be skipped
123 34 8 123
456 78 9 456
""".strip()
skiprows = [0, 2]
expected = read_csv(StringIO(data), skiprows=skiprows, delim_whitespace=True)
result = read_fwf(StringIO(data), skiprows=skiprows)
tm.assert_frame_equal(result, expected)
def test_skiprows_inference_empty():
data = """
AA BBB C
12 345 6
78 901 2
""".strip()
msg = "No rows from which to infer column width"
with pytest.raises(EmptyDataError, match=msg):
read_fwf(StringIO(data), skiprows=3)
def test_whitespace_preservation():
# see gh-16772
header = None
csv_data = """
a ,bbb
cc,dd """
fwf_data = """
a bbb
ccdd """
result = read_fwf(
StringIO(fwf_data), widths=[3, 3], header=header, skiprows=[0], delimiter="\n\t"
)
expected = read_csv(StringIO(csv_data), header=header)
tm.assert_frame_equal(result, expected)
def test_default_delimiter():
header = None
csv_data = """
a,bbb
cc,dd"""
fwf_data = """
a \tbbb
cc\tdd """
result = read_fwf(StringIO(fwf_data), widths=[3, 3], header=header, skiprows=[0])
expected = read_csv(StringIO(csv_data), header=header)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("infer", [True, False])
def test_fwf_compression(compression_only, infer):
data = """1111111111
2222222222
3333333333""".strip()
compression = compression_only
extension = "gz" if compression == "gzip" else compression
kwargs = {"widths": [5, 5], "names": ["one", "two"]}
expected = read_fwf(StringIO(data), **kwargs)
data = bytes(data, encoding="utf-8")
with tm.ensure_clean(filename="tmp." + extension) as path:
tm.write_to_compressed(compression, path, data)
if infer is not None:
kwargs["compression"] = "infer" if infer else compression
result = read_fwf(path, **kwargs)
tm.assert_frame_equal(result, expected)
def test_binary_mode():
"""
read_fwf supports opening files in binary mode.
GH 18035.
"""
data = """aas aas aas
bba bab b a"""
df_reference = DataFrame(
[["bba", "bab", "b a"]], columns=["aas", "aas.1", "aas.2"], index=[0]
)
with tm.ensure_clean() as path:
Path(path).write_text(data)
with open(path, "rb") as file:
df = pd.read_fwf(file)
file.seek(0)
tm.assert_frame_equal(df, df_reference)
@pytest.mark.parametrize("memory_map", [True, False])
def test_encoding_mmap(memory_map):
"""
encoding should be working, even when using a memory-mapped file.
GH 23254.
"""
encoding = "iso8859_1"
data = BytesIO(" 1 A Ä 2\n".encode(encoding))
df = pd.read_fwf(
data,
header=None,
widths=[2, 2, 2, 2],
encoding=encoding,
memory_map=memory_map,
)
data.seek(0)
df_reference = DataFrame([[1, "A", "Ä", 2]])
tm.assert_frame_equal(df, df_reference)
| bsd-3-clause |
massmutual/scikit-learn | sklearn/preprocessing/label.py | 137 | 27165 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
xzackli/isocurvature_2017 | analysis/plot_derived_parameters/plot_function.py | 1 | 3624 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
from scipy.stats import gaussian_kde
from pprint import pprint
import sys
import os
from astropy.io import ascii
from astropy.table import vstack
# THIS FILE: UTILITY FUNCTIONS FOR PLOTTING!
def loadChainFolder(chainfolder):
for filename in os.listdir(chainfolder):
if '.paramnames' in filename:
paramfile = os.path.join(chainfolder, filename)
# print(paramfile)
params = np.array(ascii.read(paramfile,delimiter="\t", format="no_header"))['col1']
# print(params)
data_all = None
# print(chainfolder)
for filename in os.listdir(chainfolder):
if filename.endswith(".txt"):
chainfile = os.path.join(chainfolder, filename)
# print(chainfile)
data = (ascii.read(chainfile, delimiter="\s"))[100:]
# set up column names (read in from param file)
data['col1'].name = 'acceptance'
data['col2'].name = 'likelihood'
for i in range(3,len(params)+3):
data['col' + str(i)].name = params[i-3]
if data_all == None:
data_all = data
else:
data_all = vstack( [data_all, data] )
# print(len(data), len(data_all))
return data_all
def repeatRows(data, acceptance):
newData = data[:]
# for each row in data, add rows based on acceptance number
for row, acc in zip(data, acceptance):
for i in range(acc-1):
newData.append( row )
return newData
def denplot( list_data, ax, acc, name="data", \
lower=0.0, upper=0.25, nbins=20, extend=False, \
extent=0.1, cov=0.2, fmt="k-", mylabel="label" ):
# print("repeating")
# list_data = np.array(list_data).tolist()
# list_data = repeatRows(list_data, acc)
# list_data = np.array(list_data)
x = np.linspace(lower, upper, 300)
# new_weights = data['acceptance']
if extend:
new_list_data = np.hstack( (list_data,-list_data) )
density = gaussian_kde(new_list_data)
else:
density = gaussian_kde( list_data )
density.covariance_factor = lambda : cov
density._compute_covariance()
ax.plot( x, density(x) / np.max(density(x)), fmt, label=mylabel )
# counts, bins = np.histogram( list_data, bins=x, weights=new_weights, density=True )
#ax.plot( x[:-1], counts, "r." )
ax.get_yaxis().set_ticks([])
# ax.set_ylim( 0.0, counts.max() )
ax.set_xlim( lower, upper )
ax.set_xlabel( name )
def plotRow(data, ax1, ax2, ax3, ax4, c, mylabel):
prr1 = data['P_{RR}^1']; pii1 = data['P_{II}^1']; pri1 = data['P_{RI}^1'];
prr2 = data['P_{RR}^2']; pii2 = data['P_{II}^2']; pri2 = pri1 * np.sqrt(pii2 * prr2 / (pii1 * prr1))
# make density plot
# sc(x,y)
beta_iso1 = pii1 / (prr1 + pii1)
beta_iso2 = pii2 / (prr2 + pii2)
alpha = pri1 / np.sqrt( pii1 * prr1 )
# \frac{\log( P_{AB}^2 / P_{AB}^1 )}{\log ( k_2 / k_1 )
k1 = 0.002 # Mpc^{-1}
k2 = 0.1 # Mpc^{-1}
nRR = np.log(prr2/prr1) / np.log(k2/k1)
nRI = np.log(pri2/pri1) / np.log(k2/k1)
nII = np.log(pii2/pii1) / np.log(k2/k1)
denplot( beta_iso1, ax1, data['acceptance'], r"$\beta_{iso}(k_{low})$", 0.0, 0.1, extend=True, fmt=c )
denplot( beta_iso2, ax2, data['acceptance'], r"$\beta_{iso}(k_{high})$", 0.0, 0.8, extend=True, fmt=c)
denplot( alpha, ax3, data['acceptance'], r"$\cos \Delta$", -0.5, 0.5, fmt=c)
denplot( nII, ax4, data['acceptance'], r"$n_{II}$", -1.0, 2.8, fmt=c, mylabel=mylabel )
ax4.legend()
| mit |
jasonzliang/kick_classifier | visualize_localization_error.py | 1 | 5914 | #!/usr/bin/python
'''
Created on Feb 26, 2014
@author: jason
'''
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib
import glob
import random
import networkx as nx
np.set_printoptions(suppress=True, formatter={'all':lambda x: str(x) + ','}, linewidth=150)
colors=('k','y','m','c','b','g','r','#aaaaaa')
linestyles=('-','--','-.',':')
styles=[(color,linestyle) for linestyle in linestyles for color in colors]
numPlots = 6
def parseFile(name, mode=0):
f = open(name)
x = np.zeros(40)
y = np.zeros(60)
loc = []
std = []
counter = 0
for i, line in enumerate(f):
if i == 0:
key = line.rstrip()
continue
counter +=1
rawdata = line.rstrip().split()
# print rawdata
locError, angleError, avgStd = float(rawdata[4]), float(rawdata[7]), float(rawdata[9])
loc.append(locError)
std.append(avgStd)
if int(locError/0.25) < 40:
x[int(locError/0.25)] += 1
if int(angleError) < 60:
y[int(angleError)] += 1
x = x/counter
y = y/counter
if mode == 0:
return x,y,key
else:
return np.array(loc), np.array(std)
def drawHistogram(data, keys, prefix=""):
matplotlib.rcParams.update({'font.size': 18})
plt.figure(figsize=(8, 7))
plt.title('Localization Error Performance')
plt.ylabel('CDF')
plt.xlabel('Accuracy in Meters')
x = np.arange(40)*0.25
for num, data in enumerate(zip(data, keys)):
datum,key = data
if key != 'noplot':
plt.plot(x, np.cumsum(datum), label=key, linewidth=1, color=styles[num][0],ls=styles[num][1])
plt.legend(loc=4)
plt.grid()
plt.savefig(prefix + "locError.png", bbox_inches='tight', dpi=300)
plt.clf()
def drawHistogram2(data, keys, prefix=""):
matplotlib.rcParams.update({'font.size': 18})
plt.figure(figsize=(8, 7))
plt.title('Yaw Error Performance')
plt.ylabel('CDF')
plt.xlabel('Accuracy in Degrees')
x = np.arange(60)
for num,data in enumerate(zip(data, keys)):
datum,key = data
if key != 'noplot':
plt.plot(x, np.cumsum(datum), label=key, linewidth=1, color=styles[num][0],ls=styles[num][1])
plt.legend(loc=4)
plt.grid()
plt.savefig(prefix + "angleError.png", bbox_inches='tight', dpi=300)
plt.clf()
def main():
data = []; keys = []; data2 = []
for i in xrange(numPlots):
name = "l" + str(i+1) + '.txt'
x,y,k = parseFile(name)
data.append(x); keys.append(k); data2.append(y)
drawHistogram(data, keys)
drawHistogram2(data2, keys)
def parseFile2(filename, keys=None, data1=None, data2=None, counter=0):
if keys == None:
keys = []; data1 = []; data2 = []
for i in xrange(numPlots):
data1.append(np.zeros(40))
data2.append(np.zeros(60))
f = open(filename)
lines = f.readlines()
counter += len(lines) - 1
locData = np.zeros((len(lines) - 1, numPlots))
angleData = np.zeros((len(lines) - 1, numPlots))
for i, line in enumerate(lines):
if i == 0:
keys = line.rstrip()[:-1].split(';')
continue
rawdata = line.rstrip()[:-1].split(';')
for j in xrange(0, numPlots*2, 2):
locError, angError = float(rawdata[j]), float(rawdata[j+1])
if np.isnan(locError) or np.isnan(angError):
continue
locData[i-1, j/2] = locError
angleData[i-1, j/2] = angError
if int(locError/0.25) < 40:
x = data1[j/2]
x[int(locError/0.25)] += 1.0
if int(angError) < 60:
y = data2[j/2]
y[int(angError)] += 1.0
return keys, data1, data2, counter, np.mean(locData,axis=0), np.mean(angleData,axis=0)
def multFiles(basedir="old/results_4_5-3"):
listoffiles = glob.glob(basedir+"/*.txt")
locAvgErrors = np.zeros((len(listoffiles), numPlots))
t_test = np.zeros((numPlots, numPlots))
ks_test = np.zeros((numPlots, numPlots))
keys = []; data1 = []; data2 = []; counter = 0
for i in xrange(numPlots):
data1.append(np.zeros(40))
data2.append(np.zeros(60))
for i, name in enumerate(listoffiles):
print "parsing file #" + str(i) + ":" + name
keys, data1, data2, counter, avgError, avgAngleError = parseFile2(name, keys, data1, data2, counter)
locAvgErrors[i, :] = avgError
for i in xrange(numPlots):
data1[i] = data1[i]/counter
data2[i] = data2[i]/counter
for i in xrange(numPlots):
for j in xrange(numPlots):
t_test[i,j] = stats.ttest_rel(locAvgErrors[:,i], locAvgErrors[:,j])[1]
ks_test[i,j] = stats.ks_2samp(data1[i], data1[j])[1]
#hack override
# keys = ["p-1000,l-11","p-500,l-11","p-200,l-11","p-1000,l-3","p-500,l-3","p-200,l-3"]
# keys =['p1000,l3', 'p1000,l2', 'p1000,l1', 'p500,l3', 'p500,l2', 'p500,l1']
# keys = ['p500,l3,ns', 'p500,l2,ns', 'p500,l1,ns','p500,l3,s2', 'p500,l2,s2', 'p500,l1,s2']
keys = ['noplot', 'noplot', 'no line info', '3 lines', '2 lines', '1 line']
# keys = ['original', 'p1000,l1,s1', 'p500,l11,s2', 'p500,l3,s2', 'p500,l2,s2', 'p500,l1,s2', 'p500,l11,s1', 'p1000,l11,s2']
compare = [(0,3), (1,2), (4,5), (0,1)]
for a,b in compare:
print keys[a] + " <-> " + keys[b], t_test[a,b], ks_test[a,b]
drawHistogram(data1, keys, prefix=basedir)
drawHistogram2(data2, keys, prefix=basedir)
def singleFile():
keys, data1, data2, counter, avgError, avgAngleError = parseFile2("localizationError.txt")
print "total measurements: " + str(counter)
print keys
for i in xrange(numPlots):
data1[i] = data1[i]/counter
data2[i] = data2[i]/counter
drawHistogram(data1, keys)
drawHistogram2(data2, keys)
def drawAvgStdvsLocError(name='localizationError.txt'):
loc,std = parseFile(name,mode=1)
plt.title('LocError vs avgStd Scatter')
plt.xlabel('avgStd')
plt.ylabel('LocError')
plt.scatter(std, loc)
plt.grid()
plt.savefig("scatter.png")
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
multFiles(sys.argv[1])
else:
multFiles() | mit |
asche1/LongHCPulse | LongHCPulse.py | 1 | 54775 | # Code to extract heat capacity from a long pulse in PPMS measurement
# Allen Scheie
# August, 2016
import numpy as np
import matplotlib.colors
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import sys
from scipy.interpolate import interp1d
from scipy.interpolate import CubicSpline
import copy
import pickle
cc = matplotlib.colors.ColorConverter()
class LongHCPulse:
"""For importing long pulse heat capacity data"""
def __init__(self,datafile,calfile=None,sampmass=None,molarmass=1,scaleshortpulse=1,
AdiabaticCriterion=0.1):
# If only rawfile is specified, it assumes that you're feeding it a pickle object
print('**************** LongHCPulse v 1.3.3 *****************\n'+\
' please cite https://doi.org/10.1007/s10909-018-2042-9\n'+\
'******************************************************')
if ((calfile == None and sampmass == None) and datafile.endswith('.pickle')):
with open(datafile, 'rb') as pf:
pickdict = pickle.load(pf)
self._importPickle(**pickdict)
else: self._importData(datafile,calfile,sampmass,molarmass,scaleshortpulse,
AdiabaticCriterion)
def _importPickle(self, **entries):
'''file must have been saved with savefile function'''
self.__dict__.update(entries)
def _importData(self,rawfile,calfile,sampmass,molarmass,scaleshortpulse,
AdiabaticCriterion):
# Import thermal conductivity and thermometer resistivity values from calibration file
print(" - Importing data...")
self.importCalibration(calfile)
# Get the number of points per heating/cooling curve, and the number of curves in the file
numpts, numcurves = self._countpoints(rawfile)
DataHeader = False
DataLines = False #index for keeping track of when data begins in file
i = 0 # Index for keeping track of data points
j = 0-1 # Index for keeping track of heating curves
self.rawdata = np.empty((numcurves,3,numpts,2))*np.nan # Array which holds all raw data in file
# First index: pulse number
# Second index: data type (Time, Sample Temperature, Heater Power)
# Third index: actual data points
# Fourth index: 0 = heating curve, 1 = cooling curve
self.Tb = np.empty((numcurves))*np.nan # Array of system temperatures
self.Tsamp = np.empty((numcurves))*np.nan # Array of average sample temperatures
self.Tstart = np.empty((numcurves))*np.nan
self.Bfield = np.empty((numcurves))*np.nan # Array for holding magnetic field
self.ShortPulse = np.zeros((numcurves)) # gives HC if pulse is a short pulse, zero if long pulse
self.AddendaHC = np.zeros((2,numcurves)) # Subtracted from HC at the very end.
self.ThermCondWire = np.zeros((2,numcurves)) # Used to compare
##self.ThermCondWire = []
# Import data
for line in open(rawfile):
if DataLines==True:
d = line.strip('\n').split(",")
if d[1] == '': #only accept lines for which there are no comments
if d[4] == '0':
kk = 1 # If heater power is zero, it's a cooling curve.
self.rawdata[j,0,i,kk] = d[0] #time (s)
#self.rawdata[j,1,i,kk] = d[3] #Temp (K) BAD!! UNCORRECTED!!
self.rawdata[j,2,i,kk] = d[4] #Heater Power (W)
self.rawdata[j,1,i,kk] = self._resisToTemp(float(d[2]), self.Bfield[j])
i+=1
else:
kk = 0 # heating curve.
self.rawdata[j,0,ii,kk] = d[0] #time (s)
#self.rawdata[j,1,ii,kk] = d[3] #Temp (K) BAD!! UNCORRECTED!!
self.rawdata[j,2,ii,kk] = d[4] #Heater Power (W)
self.rawdata[j,1,ii,kk] = self._resisToTemp(float(d[2]), self.Bfield[j])
# Attempt to correct heating pulses for improper power values. Didn't work.
# Assumes that voltage is measured across heater
startT = self.rawdata[j,1,0,0]
#self.rawdata[j,2,ii,kk] *= self._HeaterRes(startT) /\
# self._HeaterRes(self.rawdata[j,1,ii,kk])
ii +=1
# find the information needed to compute heat capacity
if DataLines==False:
if "SystemTemp=" in line:
self.Tb[j] = float(line.strip('\n').split(',')[1][11:])
# Note that the log files typically show this to be constant over each pulse
#if "StableStartTemperature=" in line:
# self.Tb[j] = float(line.split(',')[1][len("StableStartTemperature="):-2])
if "TempMinMidMax=" in line:
self.Tstart[j] = float(line.strip('\n').split(',')[1][len("TempMinMidMax="):].split('|')[0])
if "Field=" in line:
self.Bfield[j] = round(float(line.strip('\n').split(',')[1][6:]),0)
if "SampHC=" in line:
SampHC = float(line.strip('\n').split(',')[1][7:])
# determine if it's long pulse or short pulse
if "SampleTemp=" in line:
self.Tsamp[j] = float(line.strip('\n').split(',')[1][11:])
if "TempRise=" in line:
TempRise = float(line.strip('\n').split(',')[1][9:])
Tratio = TempRise / self.Tsamp[j]
# CRITERION FOR ADIABATIC PULSE: Trise/Tsamp < AdiabaticCriterion
# If this is true, then save the sample heat capacity. If not, value = 0
if ("SampHC=" in line and Tratio < AdiabaticCriterion):
self.ShortPulse[j] = float(line.strip('\n').split(',')[1][7:])
if "AddendaHC=" in line:
self.AddendaHC[1,j] = float(line.strip('\n').split(',')[1][10:]) # microJ/K
self.AddendaHC[0,j] = self.Tsamp[j]
# Convert to J/K from microJ/K
self.AddendaHC[1,j] *= 1e-6
if "ThermCondWire=" in line: #and Tratio < AdiabaticCriterion):
self.ThermCondWire[1,j] = float(line.strip('\n').split(',')[1][len("ThermCondWire="):])
self.ThermCondWire[0,j] = self.Tsamp[j]
#self.ThermCondWire.append( [ self.Tsamp[j],
# float(line.split(',')[1][len("ThermCondWire="):-2]) ])
# These two statements check for data lines
if "END:PULSE:PARAMS" in line:
DataLines=True
# prepare to take data
i=0
ii=0
if "BEGIN:PULSE:PARAMS" in line:
j+=1
DataLines=False
# Print progress
sys.stdout.write('\r %d%% ' % (100*j/numcurves))
sys.stdout.flush() # important for printing progress
print("\r 100%")
# Smooth data
self._smooth(n=5)
self.sampmass = sampmass
self.molarmass = molarmass
# Set all "zero-field values" to zero
self.Bfield[np.where(np.around(self.Bfield,0) == 0)] = 0
# Round all Bfield values to nearest 10 Oe
self.Bfield = np.around(self.Bfield,-1)
# Scale short pulse by user-provided factor (depends on how the measurement
# was taken).
self.ShortPulse *=scaleshortpulse
self.shortPulseAverage()
# Set scaled Bfield to be Bfield (this is changed if a demagnetization factor
# is applied.)
self.ScaledBfield = copy.deepcopy(self.Bfield)
#Define label array for later
self.labels = []
self.shortpulselabels = []
self.Bflag = []
self.shortpulseBflag = []
self.Blabels = []
def _countpoints(self, infile):
numberpoints = 0
numberpulses = 0
for line in open(infile):
if "NBinsOn" in line:
pointson = int(line.strip('\n').split(",")[1][8:])
numberpulses += 1
if "NBinsOff" in line:
pointsoff = int(line.strip('\n').split(",")[1][9:])
if (pointson) > numberpoints:
numberpoints = pointson
return numberpoints, numberpulses
def _smooth(self, n):
'''Take moving average of heating and cooling pulses'''
self.smoothedData = np.zeros((len(self.rawdata[:,1,:,:]),3,len(self.rawdata[0,1,:,:]),2))
for i in range(len(self.rawdata[:,1,:,:])):
self.smoothedData[i,1,:,0] = self._movingaverage(self.rawdata[i,1,:,0],n)
self.smoothedData[i,1,:,1] = self._movingaverage(self.rawdata[i,1,:,1],n)
self.smoothedData[i,0,:,:] = self.rawdata[i,0,:,:]
def heatcapacity(self, smoothlevel=0, StaticOffset = 0.1):
print(" - Computing Heat Capacity...")
# Initialize arrays
lenSD = len(self.smoothedData)
self.HC = np.zeros((lenSD, len(self.smoothedData[0,0]),2))
self.T = np.zeros((lenSD, len(self.smoothedData[0,0]),2))
self.HC_uncertainty = np.zeros((lenSD, len(self.smoothedData[0,0]),2))
# Sort the addenda heat capacity curve (so it can be interpolated)
self.AddendaHC = self.AddendaHC[:,self.AddendaHC[0].argsort()]
self.ThermCondWire = self.ThermCondWire[:,self.ThermCondWire[0].argsort()]
SData = self.smoothedData #create convenient shorthand for smoothed data
# Loop through all curves and compute heat capacity
for i in range(lenSD):
maxT = self.rawdata[i,1,-1,0] #maximum temperature reached in heating pulse
for k in range(2):
for j in range(1,len(self.smoothedData[i,0,:,0])-2):
Ts = self.smoothedData[i,1,j,k]
Ph = self.rawdata[i,2,j,k]
# Let thermal conductivity be the integral of conductivity w.r.t. temperature
# plus a static offset parameter (as defined by eq. 4.7b in the PPMS manual)
KwIntegral = self._WireCondIntegral(self.Tb[i], Ts)
KwdT = KwIntegral + self._WireCond(self.Tb[i])*(Ts- self.Tb[i])*StaticOffset
# compute dT/dt using 2nd order central finite difference
dTdt = (8.*(SData[i,1,j+1,k]- SData[i,1,j-1,k]) - (SData[i,1,j+2,k]- SData[i,1,j-2,k]) )/\
(12.*(SData[i,0,j,k]-SData[i,0,j-1,k]))
# compute dT/dt using 1st order central finite difference
#dTdt = (SData[i,1,j+1,k]- SData[i,1,j-1,k])/\
# (2*(SData[i,0,j,k]-SData[i,0,j-1,k]))
# compute heat capacity
#################################################
SData[i,2,j,k] = (-KwdT + Ph)/dTdt
#################################################
# subtract addenda
SData[i,2,j,k] -= self._addenda(Ts)
###############################
# Compute uncertainty
###############################
deltaTb = 0.0001
deltaTs = 0.00003
if k == 0 : deltaP = 0.1e-12 # typically negligible
else: deltaP = 0
# We approximate deltaKw from the spread of any short pulse data that exists.
try: deltaKw = np.interp(Ts, self.avgThermCondWire[0], self.avgThermCondWire[2])
except AttributeError: deltaKw = 4e-10 # <-- Dominant term
deltaS = StaticOffset*0.1
#######################################
self.HC_uncertainty[i,j,k] = 1/dTdt * np.sqrt(
((1-StaticOffset)*self._WireCond(self.Tb[i])* deltaTb)**2 +\
((1+StaticOffset)*(Ts - self.Tb[i])* deltaKw)**2 +\
deltaP**2 +\
(self._WireCond(self.Tb[i])*(Ts - self.Tb[i])*deltaS)**2 +\
((self._WireCond(Ts)+StaticOffset*self._WireCond(self.Tb[i]))**2 +\
(65*SData[i,2,j,k]**2)/
(72*(SData[i,0,j,k]-SData[i,0,j-1,k])**2)) * deltaTs**2
)
####################################################
#Eliminate values too close to max T or min T
if (((Ts - self.Tb[i])/self.Tb[i] < 0.1) or ((Ts - self.Tb[i]) < 0.025)):
SData[i,2,j,k] = np.nan
if k == 1: #Eliminate values too close to max T on heating
if (maxT - Ts)/(maxT-self.Tb[i]) < 0.15:
SData[i,2,j,k] = np.nan
elif k == 0: #Eliminate values to close to min T on cooling
if (maxT - Ts)/(maxT-self.Tb[i]) < 0.16:
SData[i,2,j,k] = np.nan
# first two and last two data points can't have value: no n-1 term
SData[i,2,0,k] = np.nan
SData[i,2,1,k] = np.nan
SData[i,2,-1,k] = np.nan
SData[i,2,-2,k] = np.nan
SData[i,2,-3,k] = np.nan
# eliminate the first few points from a heating pulse (never reliable)
if k ==0: SData[i,2,2:5,k] *= np.nan
# Apply this same elimination to uncertainty
self.HC_uncertainty[i,np.argwhere(np.isnan(SData[i,2,:,k])),k] *= np.nan
# Take moving average of data
self.T[i,:,k] = SData[i,1,:,k]
self.HC[i,:,k] =SData[i,2,:,k]
#self.HC[i,:,k] = self._movingaverage( SData[i,2,:,k], smoothlevel)
#Print progress
sys.stdout.write('\r %d%% ' % (100*i/lenSD))
sys.stdout.flush() # important
# Convert to J/K/mol from J/K
self.HC *= self.molarmass/(self.sampmass*1e-3)
self.HC_uncertainty *= self.molarmass/(self.sampmass*1e-3)
print("\r 100%")
def shortPulseAverage(self):
"""Combine short pulse data points taken at the same temperatures. Threshold
is defined in terms of percent change. Adjust this as necessary."""
threshold = 0.01
avgSpHc = [[]]
avgSpT = [[]]
avgTCW = []
Bindex = 0
# Initialize the first values:
firstnonzero = next((i for i, x in enumerate(self.ShortPulse) if x), None)
if firstnonzero is None:
self.avgSpHc = []
self.avgSpT = [[]]
self.avgSpB = [[]]
self.ScaledavgSpB = [[]]
return 0
Bprev = self.Bfield[firstnonzero]
avgSpB = [Bprev]
Tprev = round(self.Tsamp[firstnonzero],3)
Tavg = [self.Tsamp[firstnonzero]]
Cavg = [self.ShortPulse[firstnonzero]]
KWavg = [self.ThermCondWire[1,firstnonzero]]
for i, sp in enumerate(self.ShortPulse):
if sp != 0:
if self.Bfield[i] not in avgSpB:
avgSpHc[Bindex].append(np.mean(Cavg))
avgSpT[Bindex].append(np.mean(Tavg))
avgTCW.append([np.mean(Tavg), np.mean(KWavg), np.std(KWavg), len(Tavg)])
Tavg = [self.Tsamp[i]]
Cavg = [sp]
KWavg = [self.ThermCondWire[1,i]]
# start new data set
Tprev = np.mean(Tavg)
avgSpB.append(self.Bfield[i])
avgSpHc.append([])
avgSpT.append([])
Bindex = np.where(avgSpB == self.Bfield[i])[0][0]
else:
if np.abs((self.Tsamp[i] - Tprev)/Tprev) < threshold:
Tavg.append(self.Tsamp[i])
Cavg.append(sp)
KWavg.append(self.ThermCondWire[1,i])
else:
avgSpHc[Bindex].append(np.mean(Cavg))
avgSpT[Bindex].append(np.mean(Tavg))
avgTCW.append([np.mean(Tavg), np.mean(KWavg), np.std(KWavg), len(Tavg)])
Tavg = [self.Tsamp[i]]
Cavg = [sp]
KWavg = [self.ThermCondWire[1,i]]
Tprev = np.mean(Tavg)
#Bprev = self.Bfield[i]
#Tprev = round(self.Tsamp[i],3)
avgSpHc[Bindex].append(np.mean(Cavg))
avgSpT[Bindex].append(np.mean(Tavg))
avgTCW.append([np.mean(Tavg), np.mean(KWavg), np.std(KWavg), len(Tavg)])
self.avgSpHc = np.array(avgSpHc)
self.avgSpT = avgSpT
self.avgSpB = avgSpB
self.ScaledavgSpB = copy.deepcopy(self.avgSpB)
self.avgThermCondWire = np.array(avgTCW)
#sort the thermal conductivity
Tarrinds = self.avgThermCondWire[:,0].argsort()
self.avgThermCondWire = self.avgThermCondWire[Tarrinds].T
#self.avgThermCondWire[1] = self._movingaverage(self.avgThermCondWire[1], 2)
#self.avgThermCondWire[2] = self._movingaverage(self.avgThermCondWire[2], 2)
def importCalibration(self,calfile):
"""Imports thermal conductivity, heater resistivity,
and thermometer resistance from calibration file"""
lines = open(calfile,'r').readlines()
for ll in range(len(lines)):
line = lines[ll]
if "[Temp_Cond]" in line:
self.Kw = np.array(self._recordCalData(lines, ll)).T
elif "[Temp_HtrRes]" in line:
self.HtrRes = np.array(self._recordCalData(lines, ll)).T
# Import thermometer resistance at various fields (rest of function)
elif "[CalibrationFields]" in line:
self.CalFields = {'f0':0.0}
FieldNames = ['f0'] # there's always zero field
numfields = int(lines[ll+1].split('=')[1])
for i in range(numfields):
cf = lines[ll+2+i].split('=')
self.CalFields[cf[0]] = float(cf[1])
FieldNames.append(cf[0])
ThRes = {fn: [] for fn in FieldNames}
elif "[Temp_ThRes" in line:
if any([x in line for x in FieldNames[1:]]):
CalField = [x for x in FieldNames if x in line][0]
newdata = self._recordCalData(lines, ll)
ThRes[CalField].extend(newdata)
# Import zero field data
elif "f" not in line:
newdata = self._recordCalData(lines, ll)
ThRes['f0'].extend(newdata)
for f in FieldNames: # convert to sorted numpy array
ThRes[f] = np.array(ThRes[f])
ThRes[f] = ThRes[f][ThRes[f][:,0].argsort()].T
# Combine close repeated values in thermometer resistance
self.AvgThRes = {fn: [[],[]] for fn in FieldNames}
AvgThTemp = {fn: [[],[]] for fn in FieldNames}
threshold = 0.005 #(K)
for f in FieldNames:
Tavg = []
Ravg = []
Tprev = ThRes[f][0,0]
for i, Tem in enumerate(ThRes[f][0]):
if (np.abs(Tem-Tprev) <= threshold) | (np.abs(Tem-Tprev)/Tem <= threshold):
Tavg.append(Tem)
Ravg.append(ThRes[f][1,i])
else:
self.AvgThRes[f][0].append(np.mean(Tavg))
self.AvgThRes[f][1].append(np.mean(Ravg))
Tavg = [Tem]
Ravg = [ThRes[f][1,i]]
Tprev = Tem
# Add a few extra points at the end so the cubic spline doesn't get messed up
# atr = self.AvgThRes[f]
# beginslope = (atr[1][0] - atr[1][1])/\
# (atr[0][0] - atr[0][1])
# xbeg1 = atr[0][0] - (atr[0][1] - atr[0][0])
# xbeg2 = (atr[0][0] + xbeg1)/2.
# xbeg3 = (xbeg1 + xbeg2)/2.
# xbeg4 = (xbeg1 + xbeg3)/2.
# for x in [xbeg2, xbeg3, xbeg4, xbeg1]:
# ynew = beginslope*(x-atr[0][0]) + atr[1][0]
# self.AvgThRes[f][0].insert(0,x)
# self.AvgThRes[f][1].insert(0,ynew)
# endslope = (atr[1][-1] - atr[1][-2])/\
# (atr[0][-1] - atr[0][-2])
# xend1 = atr[0][-1] + (atr[0][-1] - atr[0][-2])
# xend2 = (atr[0][-1] + xend1)/2.
# xend3 = (xend2 + xend1)/2.
# xend4 = (xend2 + xend3)/2.
# for x in [xend4, xend2, xend3, xend1]:
# ynew = endslope*(x-atr[0][-1]) + atr[1][-1]
# self.AvgThRes[f][0].append(x)
# self.AvgThRes[f][1].append(ynew)
# Prepare functions for interpolation
self.ResTempFunc = {}
for f in FieldNames:
self.AvgThRes[f] = np.array(self.AvgThRes[f]) # Used for "_tempToResis"
# Smooth data (unhelpful)
#self.AvgThRes[f][0] = self._movingaverage( self.AvgThRes[f][0], 3)
AvgThTemp[f] = self.AvgThRes[f][:,self.AvgThRes[f][1].argsort()] # Used for "_resisToTemp"
# self.ResTempFunc[f] = interp1d(AvgThTemp[f][1], AvgThTemp[f][0], kind='cubic',
# bounds_error = False)
self.ResTempFunc[f] = CubicSpline(AvgThTemp[f][1], AvgThTemp[f][0],
bc_type = 'natural',extrapolate=True)
# plt.figure()
# plt.plot(AvgThTemp[FieldNames[0]][1], 1/AvgThTemp[FieldNames[0]][0], '.')
# xdat = np.linspace(AvgThTemp[FieldNames[0]][1][0],AvgThTemp[FieldNames[0]][1][-1], 1000)
# plt.plot(xdat, 1/self.ResTempFunc[FieldNames[0]](xdat))
def _recordCalData(self,datalines,start):
"""Used with the importCalibration function"""
count = int(datalines[start + 6].split('=')[1])
data = []
for i in range(start+7, start+7+count):
data.append([float(d) for d in datalines[i].strip('\n').strip(',').split(',')])
return data
def _movingaverage(self, datay, n):
"""Computes moving average of the y data set"""
if not (n & 0x1):
n+=1 # Force the average number to be odd (to keep x values accurate)
newdatay = np.convolve(datay, np.ones((n,))/n, mode='same')
for i in range(int((n-1)/2)):
newdatay[i] = np.average(datay[:(2*i)+1])
newdatay[-i-1] = np.average(datay[-(2*i)-1:])
return newdatay
def _movingaverage_xspacing(self, datax, datay, smooth, xSpacing):
"""Computes moving average of the y data set, but limited only to
data points under xSpacing distance."""
length = len(datay)
newy = np.zeros(length)
for i in range(length):
if (i-smooth) < 0:
llim = 0
ulim = 2*i
elif (i+smooth) >= length:
llim = 2*i - length
ulim = length-1
else:
llim = i-smooth
ulim = i+smooth
indexrange = np.arange(llim, ulim+1)
xspacerange = np.where((datax < datax[i]+xSpacing) & (datax > datax[i]-xSpacing))[0]
indices = np.intersect1d(indexrange, xspacerange)
#TODO: Force range to be symmetric about i
lowind, highind = indices[0], indices[-1]
if i-lowind < highind-i:
highind = i + (i-lowind)
elif i-lowind > highind-i:
lowind = i - (highind-i)
#print i, indexrange, xspacerange, lowind, highind
#print np.array(datay)[indices]
newy[i]= np.average(datay[lowind:highind+1])
return newy
def _WireCond(self,temp):
"""Returns the wire conductivity for a given temperature
(must run 'importCalibration' first)"""
tcw= self.Kw
return np.interp(temp, tcw[0], tcw[1])
# #atcw = self.avgThermCondWire
# if np.logical_and(temp < tcw[0,-1], temp>tcw[0,0]):
# return np.interp(temp, tcw[0], tcw[1])
# #If temp is not in range of avgThermCondWire, extrapolate last two points.
# elif temp < atcw[0,0]:
# x1, y1, x2, y2 = tcw[0,0], tcw[1,0], tcw[0,1], tcw[1,1]
# AA = (y2-y1)/(x2-x1)
# BB = y1-AA*x1
# return AA*temp + BB
# elif temp > atcw[0,-1]:
# x1, y1, x2, y2 = tcw[0,-2], tcw[1,-2], tcw[0,-1], tcw[1,-1]
# AA = (y2-y1)/(x2-x1)
# BB = y1-AA*x1
# return AA*temp + BB
# #return np.interp(temp, self.ThermCondWire[0], self.ThermCondWire[1])
def _WireCondIntegral(self, temp1, temp2):
"""Used for computing heat capacity"""
tcw = self.Kw
startK = self._WireCond(temp1)
endK = self._WireCond(temp2)
intermediate = np.where(np.logical_and(tcw[0]>temp1, tcw[0]<temp2))[0]
if len(intermediate) == 0:
Kint = 0.5*(endK + startK)*(temp2-temp1)
else:
interT = tcw[0][intermediate]
interK = tcw[1][intermediate]
# Integrate with the trapezoid rule
Kint = 0.5*(startK + interK[0])*(interT[0]-temp1)
for i in range(len(intermediate)-1):
Kint += 0.5*(interK[i] + interK[i+1])*(interT[i+1]-interT[i])
Kint += 0.5*(endK + interK[-1])*(temp2-interT[-1])
return Kint
def _HeaterRes(self,temp):
"""Returns the heater resistance for a given temperature
(must run 'importCalibration' first)"""
return np.interp(temp, self.HtrRes[0], self.HtrRes[1])
def _tempToResis(self, Temp, Field):
"""Returns the thermometer resistance for a given temperature"""
## Interpolate temperatures
FieldArray = np.array(list(self.CalFields.values()))
IntrpTRes = np.zeros(len(FieldArray))
for i,f in enumerate(FieldArray):
IntrpTRes[i] = np.interp(Temp, self.AvgThRes['f'+str(i)][0], self.AvgThRes['f'+str(i)][1])
## Interpolate resistance between two closest fields
return np.interp(Field, FieldArray, IntrpTRes)
def _resisToTemp(self, Resis, Field):
"""Used to compute temperature from thermometer resistance.
For long pulses, the PPMS calculates temperature incorrectly."""
## Interpolate temperatures
FieldArray = np.array(list(self.CalFields.values()))
IntrpTTemp = np.zeros(len(FieldArray))
for i,f in enumerate(FieldArray):
IntrpTTemp[i] = self.ResTempFunc['f'+str(i)](Resis)
## Interpolate resistance between two closest fields
return np.interp(Field, FieldArray, IntrpTTemp)
def _addenda(self,temp):
"""Returns the addenda Hc (in J/K) for a given temperature
(also works for an array of temperatures)"""
return np.interp(temp, self.AddendaHC[0], self.AddendaHC[1])
def _combineTraces(self,smooth, FieldBinSize=10, useHeatPulses=False, onlyHeatPulses=False):
"""Combines all heat capacity traces into a single line.
Used for lineplotCombine and Entropy calculations.
FieldBinSize in in Oe, default setting is to only use cooling pulses."""
Barray = np.sort(list(set(self.Bfield)))
self.CombB = Barray
ScaledBarray = np.zeros(len(Barray))
for ii in range(len(Barray)):
B = Barray[ii]
Bindex = np.where(self.Bfield==B)[0][0]
ScaledBarray[ii] = self.ScaledBfield[Bindex]
#ScaledBarray = np.sort(list(set(np.around(self.ScaledBfield,1))))
self.ScaledCombB = ScaledBarray
if len(ScaledBarray) != len(Barray):
raise IndexError("ScaledBarray != Barray")
HCtoComb = []
TtoComb = []
self.CombHC = []
self.CombT = []
#plt.figure()
# Find where the short pulses are
LongPindices = np.where(self.ShortPulse==0)[0]
# loop through fields and combine traces
for jj in range(len(Barray)):
B = Barray[jj]
# find all indices where the magnetic field is of the specified value
Bindices = np.where(np.abs(self.Bfield - B)<FieldBinSize)[0]
# Take intersection of long pulses and field to eliminate short pulse data
Bindices = np.intersect1d(Bindices, LongPindices)
if len(Bindices) == 0:
continue # Skip if no data exists for this field
combinedHc = []
combinedT = []
# Average the points which overlap with another curve
for bi in Bindices:
if onlyHeatPulses: # Combine all heating pulses into a single trace
nonnan = np.where(~np.isnan(np.array(self.HC[bi][:,0])))
overlapdataHC = self.HC[bi][nonnan,0].flatten()
for bj in Bindices[np.where(Bindices != bi)]:
overlapdataHC = np.vstack((overlapdataHC,
np.interp(self.T[bi][nonnan,0].flatten(),
self.T[bj][:,0], self.HC[bj][:,0],
right = np.nan, left = np.nan)))
combinedHc.extend(np.nanmean(np.array(overlapdataHC), axis=0))
combinedT.extend(self.T[bi][nonnan,0].flatten())
else:
#Combine all cooling pulses into a single trace
nonnan = np.where(~np.isnan(np.array(self.HC[bi][:,1])))
overlapdataHC = self.HC[bi][nonnan,1].flatten()
for bj in Bindices[np.where(Bindices != bi)]:
overlapdataHC = np.vstack((overlapdataHC,
np.interp(self.T[bi][nonnan,1].flatten(),
self.T[bj][:,1][::-1], self.HC[bj][:,1][::-1],
right = np.nan, left = np.nan)))
if useHeatPulses: # include heating pulses
overlapdataHC = np.vstack((overlapdataHC,
np.interp(self.T[bi][nonnan,1].flatten(),
self.T[bj][:,0], self.HC[bj][:,0],
right = np.nan, left = np.nan)))
if useHeatPulses:
# Compute overlap with same index heating pulse
overlapdataHC = np.vstack((overlapdataHC,
np.interp(self.T[bi][nonnan,1].flatten(),
self.T[bi][:,0], self.HC[bi][:,0],
right = np.nan, left = np.nan)))
# Concatenate data to array, to be sorted later
combinedHc.extend(np.nanmean(np.array(overlapdataHC), axis=0))
combinedT.extend(self.T[bi][nonnan,1].flatten())
if useHeatPulses:
# Compute overlap with same index heating pulse
overlapdataHC = np.vstack((overlapdataHC,
np.interp(self.T[bi][nonnan,1].flatten(),
self.T[bi][:,0], self.HC[bi][:,0],
right = np.nan, left = np.nan)))
# Now repeat, but computing overlap with heating pulse
nonnan = np.where(~np.isnan(np.array(self.HC[bi][:,0])))
overlapdataHC = self.HC[bi][nonnan,0].flatten()
for bj in Bindices[np.where(Bindices != bi)]:
overlapdataHC = np.vstack((overlapdataHC,
np.interp(self.T[bi][nonnan,0].flatten(),
self.T[bj][:,1][::-1], self.HC[bj][:,1][::-1],
right = np.nan, left = np.nan)))
overlapdataHC = np.vstack((overlapdataHC,
np.interp(self.T[bi][nonnan,0].flatten(),
self.T[bj][:,0], self.HC[bj][:,0],
right = np.nan, left = np.nan)))
# Concatenate data to array, to be sorted later
combinedHc.extend(np.nanmean(np.array(overlapdataHC), axis=0))
combinedT.extend(self.T[bi][nonnan,0].flatten())
combinedHc = np.array(combinedHc)
combinedT = np.array(combinedT)
# Sort data by temperature
Tarrinds = combinedT.argsort()
combinedHc = combinedHc[Tarrinds]
combinedT = combinedT[Tarrinds]
AvgCombHc= []
AvgCombT = []
# Combine close repeated values
threshold = 0.002 #(K)
Tprev = combinedT[0]
HCprev = combinedHc[0]
i=1
while i < len(combinedT):
Tem = combinedT[i]
HCap = combinedHc[i]
if np.abs(Tem-Tprev) <= threshold:
AvgCombT.append(0.5*(Tem + Tprev))
AvgCombHc.append(0.5*(HCap + HCprev))
HCprev = combinedHc[i]
Tprev = combinedT[i]
i+=1
else:
AvgCombT.append(Tprev)
AvgCombHc.append(HCprev)
HCprev = combinedHc[i]
Tprev = combinedT[i]
i+=1
# Smooth data
AvgCombHc = self._movingaverage_xspacing(AvgCombT,AvgCombHc, smooth, 0.01)
# Append to object list
self.CombHC.append(AvgCombHc)
self.CombT.append(AvgCombT)
def _combineTracesOld(self,smooth, FieldBinSize=10):
"""Combines all heat capacity traces into a single line.
Used for lineplotCombine and Entropy calculations.
FieldBinSize in in Oe"""
Barray = np.sort(list(set(self.Bfield)))
self.CombB = Barray
ScaledBarray = np.zeros(len(Barray))
for ii in range(len(Barray)):
B = Barray[ii]
Bindex = np.where(self.Bfield==B)[0][0]
ScaledBarray[ii] = self.ScaledBfield[Bindex]
#ScaledBarray = np.sort(list(set(np.around(self.ScaledBfield,1))))
self.ScaledCombB = ScaledBarray
if len(ScaledBarray) != len(Barray):
raise IndexError("ScaledBarray != Barray")
self.CombHC = []
self.CombT = []
# Find where the short pulses are
LongPindices = np.where(self.ShortPulse==0)[0]
# loop through fields and combine traces
for jj in range(len(Barray)):
B = Barray[jj]
# find all indices where the magnetic field is of the specified value
Bindices = np.where(np.abs(self.Bfield - B)<FieldBinSize)[0]
# Take intersection of long pulses and field to eliminate short pulse data
Bindices = np.intersect1d(Bindices, LongPindices)
if len(Bindices) == 0:
continue # Skip if no data exists for this field
# Concatenate data
combinedHc = self.HC[Bindices[0]][:,1]
combinedT = self.T[Bindices[0]][:,1]
for bb in Bindices[1:]:
combinedHc = np.hstack((combinedHc,self.HC[bb][:,1]))
combinedT = np.hstack((combinedT,self.T[bb][:,1]))
# Eliminate nan values
nonnan = np.where(~np.isnan(combinedHc))
combinedHc = combinedHc[nonnan]
combinedT = combinedT[nonnan]
# Sort data by temperature
Tarrinds = combinedT.argsort()
combinedHc = combinedHc[Tarrinds]
combinedT = combinedT[Tarrinds]
AvgCombHc= []
AvgCombT = []
# Combine close repeated values
threshold = 0.005 #(K)
Tprev = combinedT[0]
HCprev = combinedHc[0]
i=1
while i < len(combinedT):
Tem = combinedT[i]
HCap = combinedHc[i]
if np.abs(Tem-Tprev) <= threshold:
AvgCombT.append(0.5*(Tem + Tprev))
AvgCombHc.append(0.5*(HCap + HCprev))
HCprev = combinedHc[i]
Tprev = combinedT[i]
i+=1
else:
AvgCombT.append(Tprev)
AvgCombHc.append(HCprev)
HCprev = combinedHc[i]
Tprev = combinedT[i]
i+=1
# Smooth data
AvgCombHc = self._movingaverage_xspacing(AvgCombT,AvgCombHc, smooth, 0.01)
# Append to object list
self.CombHC.append(AvgCombHc)
self.CombT.append(AvgCombT)
# Functions called by user:
def plotHC(self,axes,index,heatingcolor=None,coolingcolor=None,shortpulsecolor=None,
Blabels=False, demag=True, marker = 'o', PlotUncertainty=False, **kwargs):
"""If the pulse is a long pulse, plot it. If it is a short pulse,
plot a single point. If you don't want to plot a curve, leave
the color value as blank."""
ShortHC = self.ShortPulse[index]
if ShortHC == 0:
if heatingcolor is not None:
if 'color' in kwargs:
axes.plot(self.T[index][:,0], self.HC[index][:,0],**kwargs)
else:
axes.plot(self.T[index][:,0], self.HC[index][:,0],color=heatingcolor, **kwargs)
if PlotUncertainty == True:
axes.fill_between(self.T[index][:,0],
self.HC[index][:,0]+ self.HC_uncertainty[index][:,0],
self.HC[index][:,0]- self.HC_uncertainty[index][:,0],
facecolor=[(2*i+0.5)/3. for i in cc.to_rgba(heatingcolor, alpha=0.5)],
edgecolor='none', interpolate=True)
if coolingcolor is not None:
if 'color' in kwargs:
axes.plot(self.T[index][:,1], self.HC[index][:,1], **kwargs)
else:
axes.plot(self.T[index][:,1], self.HC[index][:,1],color=coolingcolor, **kwargs)
if PlotUncertainty == True:
axes.fill_between(self.T[index][:,1],
self.HC[index][:,1]+ self.HC_uncertainty[index][:,1],
self.HC[index][:,1]- self.HC_uncertainty[index][:,1],
facecolor=[(2*i+0.5)/3. for i in cc.to_rgba(coolingcolor, alpha=0.5)],
edgecolor='none', alpha=0.3, interpolate=True)
else:
if shortpulsecolor is not None:
axes.plot(self.Tsamp[index],self.ShortPulse[index], marker=marker,
markeredgecolor=shortpulsecolor, markerfacecolor='none')
# Create labels for a data legend
if Blabels == False:
heating_curve = mlines.Line2D([], [], color=heatingcolor, label='Heating Curve', **kwargs)
cooling_curve = mlines.Line2D([], [], color=coolingcolor, label='Cooling Curve', **kwargs)
short_pulses = mlines.Line2D([], [], color=shortpulsecolor, marker=marker,
markeredgecolor=shortpulsecolor, markerfacecolor='none',
label='Adiabatic Pulses', linestyle="None")
self.labels = []
if heatingcolor is not None:
self.labels.append(heating_curve)
if coolingcolor is not None:
self.labels.append(cooling_curve)
if (shortpulsecolor is not None) and (ShortHC != 0):
self.labels.append(short_pulses)
if Blabels == True:
if demag == True:
Bvalue = self.ScaledBfield[index]
else:
Bvalue = self.Bfield[index]
if len(self.labels) == 0: self.Bflag = [] # This means labels has been reset
if Bvalue not in self.Bflag:
# Right now, it uses the cooling curve color for the label.
labl = str(abs(Bvalue)/10000)+' T'
#Blab = mlines.Line2D([], [], color=coolingcolor, label=labl)
self.Bflag.append(Bvalue)
if (coolingcolor is not None) and (heatingcolor is not None):
self.labels.append(mlines.Line2D([], [], color=coolingcolor,
label=labl + ' (cooling)'))
self.labels.append(mlines.Line2D([], [], color=heatingcolor,
label=labl+ ' (heating)'))
self.Bflag.append(Bvalue)
elif coolingcolor is not None:
self.labels.append(mlines.Line2D([], [], color=coolingcolor, label=labl))
elif heatingcolor is not None:
self.labels.append(mlines.Line2D([], [], color=heatingcolor, label=labl))
else:
self.labels.append(mlines.Line2D([], [], color=shortpulsecolor, label=labl))
#Sort Bflags, and make into array of labels
self.labels = [x for (y,x) in sorted(zip(self.Bflag,self.labels),
key=lambda pair: pair[0])]
self.Bflag = sorted(self.Bflag)
if (Bvalue not in self.shortpulseBflag and ShortHC != 0):
labl = str(Bvalue/10000)+' T'
self.shortpulseBflag.append(Bvalue)
if shortpulsecolor is not None:
self.shortpulselabels.append(mlines.Line2D([], [], color=shortpulsecolor,
marker='o', linestyle="None", markeredgecolor=shortpulsecolor,
markerfacecolor='none', label=labl))
self.shortpulselabels = [x for (y,x) in sorted(zip(self.shortpulseBflag,
self.shortpulselabels),
key=lambda pair: pair[0])]
self.shortpulseBflag = sorted(self.shortpulseBflag)
def plotHCT(self,axes,index,heatingcolor=None,coolingcolor=None,shortpulsecolor=None,
Blabels=False, demag=True, marker = 'o', PlotUncertainty=False, **kwargs):
"""If the pulse is a long pulse, plot it. If it is a short pulse,
plot a single point. If you don't want to plot a curve, leave
the color value as blank."""
ShortHC = self.ShortPulse[index]
if ShortHC == 0:
if heatingcolor is not None:
if 'color' in kwargs:
axes.plot(self.T[index][:,0], self.HC[index][:,0]/self.T[index][:,0], **kwargs)
else:
axes.plot(self.T[index][:,0], self.HC[index][:,0]/self.T[index][:,0],color=heatingcolor, **kwargs)
if PlotUncertainty == True:
axes.fill_between(self.T[index][:,0],
(self.HC[index][:,0]+ self.HC_uncertainty[index][:,0])/self.T[index][:,0],
(self.HC[index][:,0]- self.HC_uncertainty[index][:,0])/self.T[index][:,0],
facecolor=[(2*i+0.5)/3. for i in cc.to_rgba(heatingcolor, alpha=0.5)],
edgecolor='none', interpolate=True)
if coolingcolor is not None:
if 'color' in kwargs:
axes.plot(self.T[index][:,1], self.HC[index][:,1]/self.T[index][:,1], **kwargs)
else:
axes.plot(self.T[index][:,1], self.HC[index][:,1]/self.T[index][:,1],color=coolingcolor, **kwargs)
if PlotUncertainty == True:
axes.fill_between(self.T[index][:,1],
(self.HC[index][:,1]+ self.HC_uncertainty[index][:,1])/self.T[index][:,1],
(self.HC[index][:,1]- self.HC_uncertainty[index][:,1])/self.T[index][:,1],
facecolor=[(2*i+0.5)/3. for i in cc.to_rgba(coolingcolor, alpha=0.5)],
edgecolor='none', alpha=0.3, interpolate=True)
else:
if shortpulsecolor is not None:
axes.plot(self.Tsamp[index],self.ShortPulse[index]/self.Tsamp[index], marker=marker,
markeredgecolor=shortpulsecolor, markerfacecolor='none')
# Create labels for a data legend
if Blabels == False:
heating_curve = mlines.Line2D([], [], color=heatingcolor, label='Heating Curve', **kwargs)
cooling_curve = mlines.Line2D([], [], color=coolingcolor, label='Cooling Curve', **kwargs)
short_pulses = mlines.Line2D([], [], color=shortpulsecolor, marker=marker,
markeredgecolor=shortpulsecolor, markerfacecolor='none',
label='Adiabatic Pulses', linestyle="None")
self.labels = []
if heatingcolor is not None:
self.labels.append(heating_curve)
if coolingcolor is not None:
self.labels.append(cooling_curve)
if (shortpulsecolor is not None) and (ShortHC != 0):
self.labels.append(short_pulses)
if Blabels == True:
if demag == True:
Bvalue = self.ScaledBfield[index]
else:
Bvalue = self.Bfield[index]
if len(self.labels) == 0: self.Bflag = [] # This means labels has been reset
if Bvalue not in self.Bflag:
# Right now, it uses the cooling curve color for the label.
labl = str(abs(Bvalue)/10000)+' T'
#Blab = mlines.Line2D([], [], color=coolingcolor, label=labl)
self.Bflag.append(Bvalue)
if (coolingcolor is not None) and (heatingcolor is not None):
self.labels.append(mlines.Line2D([], [], color=coolingcolor,
label=labl + ' (cooling)'))
self.labels.append(mlines.Line2D([], [], color=heatingcolor,
label=labl+ ' (heating)'))
self.Bflag.append(Bvalue)
elif coolingcolor is not None:
self.labels.append(mlines.Line2D([], [], color=coolingcolor, label=labl))
elif heatingcolor is not None:
self.labels.append(mlines.Line2D([], [], color=heatingcolor, label=labl))
else:
self.labels.append(mlines.Line2D([], [], color=shortpulsecolor, label=labl))
#Sort Bflags, and make into array of labels
self.labels = [x for (y,x) in sorted(zip(self.Bflag,self.labels),
key=lambda pair: pair[0])]
self.Bflag = sorted(self.Bflag)
if (Bvalue not in self.shortpulseBflag and ShortHC != 0):
labl = str(Bvalue/10000)+' T'
self.shortpulseBflag.append(Bvalue)
if shortpulsecolor is not None:
self.shortpulselabels.append(mlines.Line2D([], [], color=shortpulsecolor,
marker='o', linestyle="None", markeredgecolor=shortpulsecolor,
markerfacecolor='none', label=labl))
self.shortpulselabels = [x for (y,x) in sorted(zip(self.shortpulseBflag,
self.shortpulselabels),
key=lambda pair: pair[0])]
self.shortpulseBflag = sorted(self.shortpulseBflag)
def lineplot(self,axes,Barray, plotHeatPulses=False, markers = ['s','^','o','x'], **kwargs):
""" Plots all the traces of long-pulse heat capacity and points of short-pulse
Currently uses 'gist_rainbow' colormap. If you don't like it, change it."""
if (Barray == 'All' or Barray == 'all'):
Barray = np.sort(list(set(self.Bfield)))
else:
Barray = np.array(Barray)
# determine the color map
#colormap = plt.cm.hsv((Barray*1.0)/Barray[-1]) * 0.6 #based on field
colormap = plt.cm.hsv(np.arange(len(Barray))*1.0/len(Barray))* 0.75 #based on index
colors = dict(zip(Barray, colormap))
for jj in range(len(self.Bfield)):
B = self.Bfield[jj]
for b in Barray:
if B == b:
self.plotHC(axes=axes,index=jj,coolingcolor=colors[B],Blabels=True, **kwargs)
if plotHeatPulses == True:
heatpulsecolor = colors[B]*0.6
heatpulsecolor[-1] = 0.9
self.plotHC(axes=axes,index=jj,heatingcolor=heatpulsecolor,Blabels=True, **kwargs)
# Plot short pulse data
if np.count_nonzero(self.ShortPulse) == 0: return
for jj, b in enumerate(Barray):
for i in range(len(self.avgSpB)):
spB = self.avgSpB[i]
if spB == b:
axes.plot(self.avgSpT[i], self.avgSpHc[i],color=colors[spB],
marker=markers[i%len(markers)],
markeredgecolor=colors[spB], markerfacecolor='none',
label='Adiabatic Pulses', linestyle="None")
try:
if demag == True:
labl = str(abs(self.ScaledavgSpB[i])/10000.)+' T'
else: labl = str(round(self.avgSpB[i],1)/10000.)+' T'
except NameError: labl = str(round(self.avgSpB[i],1)/10000)+' T'
self.shortpulselabels.append(mlines.Line2D([], [], color=colors[spB],
marker=markers[i%len(markers)], linestyle="None", markeredgecolor=colors[spB],
markerfacecolor='none', label=labl))
def lineplotCombine(self,axes,Barray,smooth, demag=True, plotShortPulse=True,
markers = ['s','^','o','x'], FieldBinSize = 10, onlyHeatPulses=False,
useHeatPulses=False, **kwargs):
"""Combines all the heat capacity traces in a given field so that
there is only one line plotted"""
self.labels = []
if Barray in ['All', 'all']:
Barray = np.sort(list(set(self.Bfield)))
else:
Barray = np.array(Barray)
# determine the color map
colormap = plt.cm.hsv(np.arange(len(Barray))*1.0/len(Barray))* 0.75 #based on index
colors = dict(zip(Barray, colormap))
# Combine traces into single line
self._combineTraces(smooth, FieldBinSize, useHeatPulses, onlyHeatPulses)
# plot the long pulse data
for jj in range(len(Barray)):
B = Barray[jj]
if B == 0: B=0.0 # get rid of negative sign which may be there
# find all indices where the magnetic field is of the specified value
try:
Bindex = np.where(self.CombB==B)[0][0]
except IndexError:
continue
# Plot
if demag == True:
fieldval = round(self.ScaledCombB[Bindex]/10000 , 3)
if fieldval == 0: fieldval = 0.0
labl = str(fieldval)+' T'
else:
labl = str(Barray[jj]/10000.)+' T'
if ('color' in kwargs) or ('c' in kwargs) :
axes.plot(self.CombT[Bindex], self.CombHC[Bindex], **kwargs)
else:
axes.plot(self.CombT[Bindex], self.CombHC[Bindex], color=colors[B], **kwargs)
self.labels.append(mlines.Line2D([], [], color=colors[B], label=labl))
#Plot short pulse data
if np.count_nonzero(self.ShortPulse) == 0: return
if plotShortPulse == True:
edgewidth = 0.8
for jj, b in enumerate(Barray):
for i in range(len(self.avgSpB)):
spB = self.avgSpB[i]
if spB == b:
axes.plot(self.avgSpT[i], self.avgSpHc[i],color=colors[spB],
marker=markers[i%len(markers)], markeredgewidth = edgewidth,
markeredgecolor=colors[spB], markerfacecolor='none',
label='Adiabatic Pulses', linestyle="None")
if demag == True:
labl = str(round(abs(self.ScaledavgSpB[i])/10000 , 3))+' T'
else: labl = str(round(self.avgSpB[i],1)/10000)+' T'
self.shortpulselabels.append(mlines.Line2D([], [], color=colors[spB],
marker=markers[i%len(markers)], linestyle="None",
markeredgewidth = edgewidth, markeredgecolor=colors[spB],
markerfacecolor='none', label=labl))
def _computeEntropy(self,smooth):
'''computes entropy for all field values'''
Barray = np.sort(list(set(self.Bfield)))
#Combine traces into single line (if not done already)
try:
self.CombHC
except AttributeError:
print(" combining traces...")
self._combineTraces(smooth)
self.Entropy = np.zeros_like(self.CombT)
# loop through magnetic fields
for jj in range(len(Barray)):
B = Barray[jj]
if B == 0: B=0.0 # get rid of negative sign which may be there
# find all indices where the magnetic field is of the specified value
try:
Bindex = np.where(self.CombB==B)[0][0]
except IndexError:
continue # Skip if no data exists for this field
# Compute Entropy
T = self.CombT[Bindex]
C = self.CombHC[Bindex]
entropy = np.zeros(C.size)
en = (T[0])*0.5*(C[0]/T[0])
entropy[0] = en
for i in range(0,C.size-1):
ds = (T[i+1]-T[i])*0.5*(C[i]/T[i]+C[i+1]/T[i+1])
en = en + ds
entropy[i+1] = en
self.Entropy[Bindex]=entropy
def plotEntropy(self, axes,Barray,smooth):
"""Plots entropy vs. T for various magnetic fields"""
self.entropylabels = []
if (Barray == 'All' or Barray == 'all'):
Barray = np.sort(list(set(self.Bfield)))
else:
Barray = np.array(Barray)
# determine the color map
colormap = plt.cm.hsv(np.arange(len(Barray))*1.0/len(Barray))* 0.85 #based on index
colors = dict(zip(Barray, colormap))
#Compute entropy (if not done already)
try:
self.Entropy
except AttributeError:
print(" computing entropy...")
self._computeEntropy(smooth)
# loop through magnetic fields
for jj in range(len(Barray)):
B = Barray[jj]
if B == 0: B=0.0 # get rid of negative sign which may be there
# find all indices where the magnetic field is of the specified value
try:
Bindex = np.where(self.CombB==B)[0][0]
except IndexError:
continue # Skip if no data exists for this field
# Plot
fieldval = round(self.ScaledCombB[Bindex]/10000 , 3)
if fieldval == 0: fieldval = 0.0
labl = str(fieldval)+' T'
axes.plot(self.CombT[Bindex], self.Entropy[Bindex], color=colors[B])
self.entropylabels.append(mlines.Line2D([], [], color=colors[B], label=labl))
def meshgrid(self,Tarray,Barray, useHeatPulses=False):
"""Tarray is the array of x values to be binned to
Barray is the array of magnetic fields to loop through
Set Barray to 'all' if you want to plot all the fields."""
if (Barray == 'All' or Barray == 'all'):
Barray = np.sort(list(set(self.Bfield)))
# Create array of scaledB:
ScaledBarray = np.zeros(len(Barray))
for ii in range(len(Barray)):
B = Barray[ii]
Bindex = np.where(self.Bfield==B)[0][0]
ScaledBarray[ii] = np.around(self.ScaledBfield[Bindex],1)
Intensity = np.empty((len(Tarray)-1,len(Barray)))*np.nan
for ii in range(len(Barray)):
B = Barray[ii]
# find all indices where the magnetic field is of the specified value
Bindices = np.where(self.Bfield==B)[0]
if len(Bindices) == 0:
continue # Skip if no data exists for this field
# Concatenate data into single array
binnedhc = self.binAndInterpolate(Tarray, self.T[Bindices[0]][:,1],
self.HC[Bindices[0]][:,1])
for bb in Bindices:
if self.ShortPulse[bb] != 0:
continue # skip this value if it is a short pulse
newbinnd = self.binAndInterpolate(Tarray, self.T[bb][:,1] , self.HC[bb][:,1])
binnedhc = np.vstack((binnedhc,newbinnd))
if useHeatPulses:
newbinnd = self.binAndInterpolate(Tarray, self.T[bb][:,0] , self.HC[bb][:,0])
binnedhc = np.vstack((binnedhc,newbinnd))
# Take average along given axis
AvgBinnedHc = np.nanmean(binnedhc, axis=0)
# Bin into x array
Intensity[:,ii] = AvgBinnedHc
#define edges of magnetic field bins
d = np.diff(Barray)/2.
Bedges = np.hstack([Barray[0]-d[0],Barray[0:-1]+d,Barray[-1]+d[-1]])
d = np.diff(ScaledBarray)/2.
Bedges = np.hstack([ScaledBarray[0]-d[0],ScaledBarray[0:-1]+d,ScaledBarray[-1]+d[-1]])
# Mask all the zero elements
Intensity = np.ma.masked_where(np.isnan(Intensity), Intensity)
return Intensity.T, Bedges #transpose
def binAndInterpolate(self, x_new, x_old, y_old):
###BIN
edges = np.array(x_new)
centers = edges[:-1] + (edges[1:]-edges[:-1])/2.
# check that values in x_old fall into the range of edges
# discard any that are outside the range
if x_old[0] > x_old[-1]: # reverse order so that low temperature comes first
x_old = x_old[::-1]
y_old = y_old[::-1]
if x_old[-1] > edges[-1]: # check if x_old goes higher than edges
high_idx = np.nonzero(x_old > edges[-1])[0][0]
x_old = x_old[:high_idx]
y_old = y_old[0:high_idx]
if len(x_old) == 0: # return a masked array if no data falls in range
y_new = np.zeros(np.size(x_new)-1)
return np.ma.masked_where(y_new==0 , y_new)
if x_old[0] < edges[0]:
low_idx = np.nonzero(x_old <= edges[0])[0][-1]
x_old = x_old[low_idx:]
y_old = y_old[low_idx:]
if len(x_old) == 0:
y_new = np.zeros(np.size(x_new)-1)
return np.ma.masked_where(y_new==0 , y_new)
bin_idx = np.digitize(x_old,edges)
bin_count, b = np.histogram(x_old, edges)
y_new = np.zeros(np.size(x_new)-1)
mask_idx = bin_count < 1.
mask_ct = np.ma.array(bin_count,mask = mask_idx)
for ii, idx in enumerate(bin_idx):
#print ii, idx, x_new[idx-1], x_old[ii]
y_new[idx-1] += y_old[ii]
# mask zeros then divide by bin_counts
y_new = np.ma.masked_where(y_new==0 , y_new)/mask_ct
###INTERPOLATE
# remove masked elements of array
unmaskedindices = np.where(np.logical_not(y_new.mask))[0]
y_unmasked = np.ma.compressed(y_new)
x_unmasked = centers[unmaskedindices]
if len(y_unmasked) ==0:
return np.zeros(len(centers))*np.nan
#print y_unmasked, x_unmasked
#interpolate
y_interp = np.interp(centers, x_unmasked, y_unmasked, left =np.nan, right =np.nan)
return y_interp
def scale(self, factor):
"""scale the heat capacity data by a constant.
Useful for going between per FU and per ion"""
self.HC = self.HC/factor
self.HC_uncertainty = self.HC_uncertainty/factor
self.ShortPulse = self.ShortPulse/factor
for i, lst in enumerate(self.avgSpHc):
for j, val in enumerate(lst):
self.avgSpHc[i][j] = val/factor
def scaleDemagFactor(self, demagFac, magvsfield):
"""Scale the magnetic field values by a demagnetization factor.
magvsfield should be a numpy array giving M as a function of INTERNAL field.
Note that this correction must be done numerically:
H_int = H_0 - mu0*(D)*M(H_int)
We use the bisection method here."""
mu0 = 4*np.pi*1e-7
def H0DM(field):
"""Computes H + mu0*(D)*M(H)"""
return field + mu0*demagFac*np.interp(field, magvsfield[0], magvsfield[1])*10000 #mu_B / F.U.
print("Scaling demagnetization...")
self.ScaledBfield = np.zeros(len(self.Bfield))
self.ScaledavgSpB = np.zeros(len(self.avgSpB))
for i, H0 in enumerate(self.Bfield):
if H0 == 0.0:
self.ScaledBfield[i] = 0.0
continue
# First, obtain two H values that bound the problem.
H1 = H0*1.0
mag1 = H0DM(H0)
if mag1 < H0:
while mag1 < H0:
H1 += 0.1*H0
mag1 = H0DM(H1)
H2 = H1*1.0
H1 = H2 - 0.1*H0
elif mag1 > H0:
while mag1 > H0:
H1 -= 0.1*H0
mag1 = H0DM(H1) #mu_B / F.U.
H2 = H1 + 0.1*H0
else:
H2 = H0
#print H0DM(H1)-H0, H0DM(H2)-H0
# Next, use the bisection method to determine the value
Hint = H1
Hintold = H0*1.0
while np.abs((Hint - Hintold)/H0) > 0.00001:
Hintold = Hint*1.0
Hint = 0.5*(H1+H2)
mag1 = H0DM(H1)
mag2 = H0DM(H2)
magbi = H0DM(Hint)
if magbi < H0:
H1 = Hint
else:
H2 = Hint
self.ScaledBfield[i] = Hint
# Do the same thing, but for the short pulse field values
for i, H0 in enumerate(self.avgSpB):
if H0 == 0.0:
self.ScaledBfield[i] = 0.0
continue
# First, obtain two H values that bound the problem.
H1 = H0*1.0
mag1 = H0DM(H0)
if mag1 < H0:
while mag1 < H0:
H1 += 0.1*H0
mag1 = H0DM(H1)
H2 = H1*1.0
H1 = H2 - 0.1*H0
elif mag1 > H0:
while mag1 > H0:
H1 -= 0.1*H0
mag1 = H0DM(H1) #mu_B / F.U.
H2 = H1 + 0.1*H0
else:
H2 = H0
#print H0DM(H1)-H0, H0DM(H2)-H0
# Next, use the bisection method to determine the value
Hint = H1
Hintold = H0*1.0
while np.abs((Hint - Hintold)/H0) > 0.00001:
Hintold = Hint*1.0
Hint = 0.5*(H1+H2)
mag1 = H0DM(H1)
mag2 = H0DM(H2)
magbi = H0DM(Hint)
if magbi < H0:
H1 = Hint
else:
H2 = Hint
self.ScaledavgSpB[i] = Hint
try:
# Do the same thing, but for the combined traces (if they exist)
for i, H0 in enumerate(self.CombB):
if H0 == 0.0:
self.ScaledCombB[i] = 0.0
continue
# First, obtain two H values that bound the problem.
H1 = H0*1.0
mag1 = H0DM(H0)
if mag1 < H0:
while mag1 < H0:
H1 += 0.1*H0
mag1 = H0DM(H1)
H2 = H1*1.0
H1 = H2 - 0.1*H0
elif mag1 > H0:
while mag1 > H0:
H1 -= 0.1*H0
mag1 = H0DM(H1) #mu_B / F.U.
H2 = H1 + 0.1*H0
else:
H2 = H0
#print H0DM(H1)-H0, H0DM(H2)-H0
# Next, use the bisection method to determine the value
Hint = H1
Hintold = H0*1.0
while np.abs((Hint - Hintold)/H0) > 0.00001:
Hintold = Hint*1.0
Hint = 0.5*(H1+H2)
mag1 = H0DM(H1)
mag2 = H0DM(H2)
magbi = H0DM(Hint)
if magbi < H0:
H1 = Hint
else:
H2 = Hint
self.ScaledCombB[i] = Hint
except AttributeError:
pass
def savetrace(self, index, outfile):
heatarray = np.vstack((self.T[index][:,0],self.HC[index][:,0])).T
np.savetxt(outfile+'_heating.txt',heatarray, fmt='%.9f', header = 'Temp(K)\tHC(J/[K mol-ion])',
delimiter=', ')
coolarray = np.vstack((self.T[index][:,1],self.HC[index][:,1])).T
np.savetxt(outfile+'_cooling.txt',coolarray, fmt='%.9f', header = 'Temp(K)\tHC(J/[K mol-ion])',
delimiter=', ')
rawarray = np.vstack((np.hstack((self.rawdata[index][0,:,0],self.rawdata[index][0,:,1])),
np.hstack((self.rawdata[index][1,:,0],self.rawdata[index][1,:,1])) ))
np.savetxt(outfile+'_raw-pulse.txt',rawarray.T, fmt='%.6f', header = 'time(s)\tTemp(K)',
delimiter=', ')
def savetraces(self, outfile, Barray = 'all'):
if (Barray == 'All' or Barray == 'all'):
Barray = np.sort(list(set(self.Bfield)))
else:
Barray = np.array(Barray)
f = open(outfile, 'w')
f.write('# Heat Capacity data from LongHCPulse\n')
f.write('# Temp_heating (K),\t C_heating,\t Temp_cooling (K),\t C_cooling\n')
for jj in range(len(self.Bfield)):
B = self.Bfield[jj]
for b in Barray:
if (B == b) and (self.ShortPulse[jj] == 0):
f.write('\n# B='+str(B)+'K, curve number = '+str(jj)+'\n')
for ii in range(len(self.T[jj][:,0])):
f.write(str(self.T[jj][ii,0]) +',\t'+ str(self.HC[jj][ii,0])+',\t'+
str(self.T[jj][ii,1]) +',\t'+ str(self.HC[jj][ii,1])+'\n')
f.close()
def saveData(self, outfile):
#Combine traces into single line (if not done already)
try:
self.CombHC
except AttributeError:
print(" combining traces...")
self._combineTraces(1)
#so we don't save anything that hasn't been created
members = [attr for attr in dir(self) if not callable(getattr(self, attr)) and not attr.startswith("__")]
membersToBeSaved = ['rawdata','smoothedData','Bfield', 'Blabels', 'CombB', 'CombHC', 'CombT', 'HC',
'ScaledBfield', 'ThermCondWire', 'avgThermCondWire', 'Kw', 'AddendaHC',
'ScaledCombB', 'ScaledavgSpB', 'ShortPulse', 'T', 'Tsamp', 'avgSpB', 'avgSpHc',
'avgSpT', 'molarmass', 'sampmass', 'shortpulseBflag', 'shortpulselabels',
'entropylabels', 'Entropy','labels']
membersToBeSaved = list(set(members).intersection(membersToBeSaved))
# Make list of data to be saved
dataList =eval('[self.'+ ', self.'.join(membersToBeSaved)+']')
dataToSave = dict(zip(membersToBeSaved, dataList))
# Edit outfile so it has the extension .pickle
try:
periodindex = outfile.index('.')
outfile = outfile[:periodindex] + '.pickle'
except ValueError:
outfile = outfile + '.pickle'
# Save data with pickle
with open(outfile, 'w') as f:
pickle.dump(dataToSave, f)
| gpl-3.0 |
mphe/dotfiles | ipython/profile_default/ipython_config.py | 1 | 23356 | # Configuration file for ipython.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# Whether to create profile dir if it doesn't exist
# c.BaseIPythonApplication.auto_create = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.BaseIPythonApplication.copy_config_files = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.BaseIPythonApplication.extra_config_file = u''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.BaseIPythonApplication.ipython_dir = u''
# Whether to overwrite existing config files when copying
# c.BaseIPythonApplication.overwrite = False
# The IPython profile to use.
# c.BaseIPythonApplication.profile = u'default'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
#------------------------------------------------------------------------------
# InteractiveShell configuration
#------------------------------------------------------------------------------
# An enhanced, interactive shell for Python.
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.InteractiveShell.ast_node_interactivity = 'last_expr'
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.InteractiveShell.ast_transformers = []
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.InteractiveShell.autocall = 0
# Autoindent IPython code entered interactively.
# c.InteractiveShell.autoindent = True
# Enable magic commands to be called without the leading %.
# c.InteractiveShell.automagic = True
# The part of the banner to be printed before the profile
# c.InteractiveShell.banner1 = 'Python 2.7.12 (default, Jun 28 2016, 08:31:05) \nType "copyright", "credits" or "license" for more information.\n\nIPython 5.0.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# The part of the banner to be printed after the profile
# c.InteractiveShell.banner2 = ''
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.InteractiveShell.cache_size = 1000
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.InteractiveShell.color_info = True
# Set the color scheme (NoColor, Neutral, Linux, or LightBG).
# c.InteractiveShell.colors = 'Neutral'
#
# c.InteractiveShell.debug = False
# **Deprecated**
#
# Will be removed in IPython 6.0
#
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). `deep_reload`
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.InteractiveShell.deep_reload = False
# Don't call post-execute functions that have failed in the past.
# c.InteractiveShell.disable_failing_post_execute = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.InteractiveShell.display_page = False
# (Provisional API) enables html representation in mime bundles sent to pagers.
# c.InteractiveShell.enable_html_pager = False
# Total length of command history
# c.InteractiveShell.history_length = 10000
# The number of saved history entries to be loaded into the history buffer at
# startup.
# c.InteractiveShell.history_load_length = 1000
#
# c.InteractiveShell.ipython_dir = ''
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.InteractiveShell.logappend = ''
# The name of the logfile to use.
# c.InteractiveShell.logfile = ''
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.InteractiveShell.logstart = False
#
# c.InteractiveShell.object_info_string_level = 0
# Automatically call the pdb debugger after every exception.
# c.InteractiveShell.pdb = False
# Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompt_in1 = 'In [\\#]: '
# Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompt_in2 = ' .\\D.: '
# Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompt_out = 'Out[\\#]: '
# Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompts_pad_left = True
#
# c.InteractiveShell.quiet = False
#
# c.InteractiveShell.separate_in = '\n'
#
# c.InteractiveShell.separate_out = ''
#
# c.InteractiveShell.separate_out2 = ''
# Show rewritten input, e.g. for autocall.
# c.InteractiveShell.show_rewritten_input = True
# Enables rich html representation of docstrings. (This requires the docrepr
# module).
# c.InteractiveShell.sphinxify_docstring = False
#
# c.InteractiveShell.wildcards_case_sensitive = True
#
# c.InteractiveShell.xmode = 'Context'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
c.TerminalInteractiveShell.display_completions = 'readlinelike'
# DEPRECATED
# c.TerminalInteractiveShell.display_completions_in_columns = None
# Shortcut style to use at the prompt. 'vi' or 'emacs'.
c.TerminalInteractiveShell.editing_mode = 'vi'
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = u'vim'
# Highlight matching brackets .
# c.TerminalInteractiveShell.highlight_matching_brackets = True
# The name of a Pygments style to use for syntax highlighting: manni, igor,
# lovelace, xcode, vim, autumn, vs, rrt, native, perldoc, borland, tango, emacs,
# friendly, monokai, paraiso-dark, colorful, murphy, bw, pastie, algol_nu,
# paraiso-light, trac, default, algol, fruity
# c.TerminalInteractiveShell.highlighting_style = 'legacy'
# Override highlighting format for specific tokens
# c.TerminalInteractiveShell.highlighting_style_overrides = {}
# Enable mouse support in the prompt
# c.TerminalInteractiveShell.mouse_support = False
# Class used to generate Prompt token for prompt_toolkit
# c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
# Use `raw_input` for the REPL, without completion, multiline input, and prompt
# colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
# c.TerminalInteractiveShell.simple_prompt = False
# Number of line at the bottom of the screen to reserve for the completion menu
# c.TerminalInteractiveShell.space_for_menu = 6
# Automatically set the terminal title
# c.TerminalInteractiveShell.term_title = True
#------------------------------------------------------------------------------
# HistoryAccessorBase configuration
#------------------------------------------------------------------------------
# An abstract class for History Accessors
#------------------------------------------------------------------------------
# HistoryAccessor configuration
#------------------------------------------------------------------------------
# Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryAccessor.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryAccessor.enabled = True
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
# c.HistoryAccessor.hist_file = u''
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# BaseFormatter configuration
#------------------------------------------------------------------------------
# A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
#
# c.BaseFormatter.deferred_printers = {}
#
# c.BaseFormatter.enabled = True
#
# c.BaseFormatter.singleton_printers = {}
#
# c.BaseFormatter.type_printers = {}
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
#
# c.PlainTextFormatter.float_precision = ''
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.verbose = False
#------------------------------------------------------------------------------
# Completer configuration
#------------------------------------------------------------------------------
# Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.Completer.greedy = False
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# Magics configuration
#------------------------------------------------------------------------------
# Base class for implementing magic functions.
#
# Shell functions which can be reached as %function_name. All magic functions
# should accept a string, which they can parse for their own needs. This can
# make some functions easier to type, eg `%cd ../` vs. `%cd("../")`
#
# Classes providing magic functions need to subclass this class, and they MUST:
#
# - Use the method decorators `@line_magic` and `@cell_magic` to decorate
# individual methods as magic functions, AND
#
# - Use the class decorator `@magics_class` to ensure that the magic
# methods are properly registered at the instance level upon instance
# initialization.
#
# See :mod:`magic_functions` for examples of actual implementation classes.
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| mit |
anewmark/galaxy_dark_matter | not currently in use/lumprof.py | 1 | 4175 | print('This program should make plots of aperture size v aperture maginosity')
indir='/Users/amandanewmark/repositories/galaxy_dark_matter/GAH/'
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.text as txt
from astropy.io import fits
import astropy.table as table
import matplotlib.patches as mpatches
from sympy import *
import os, sys
datatab = table.Table.read(indir+ 'LOWZ_HSCGAMA15_apmgs.fits')
#datatab=fits.open(indir+ 'LOWZ_HSCGAMA15_apmgs.fits') <-- probably better to use table
#hdulist=fits.open(indir+ 'LOWZ_HSCGAMA15_apmgs.fits')
#hdulist.info()
#print(datatab['Z'])
#datatab['imag_aperture00'] gives first aperture maginosity in band i
#
aperture=[3.0*0.168,4.5*0.168,6.0*0.168,9.0*0.168,12.0*0.168,17.0*0.168,25.0*0.168,35.0*0.168,50.0*0.168,70.0*0.168]
pi=math.pi
Naps=len(aperture)
N=len(datatab)
band= ['r','i','g', 'z', 'y']
band=band[0]
no_flag=0
no_sat=0
no_edge=1
if no_flag:
flag=('No Flag', 'no_flag')
num=len(datatab)
Nnew=num
num=str(num)
if no_sat:
datatab_flag = datatab[(datatab[band+'flags_pixel_saturated_center']==False) & (datatab[band+'mag_aperture00']<50)& (datatab[band+'mag_aperture01']<50)& (datatab[band+'mag_aperture02']<50) & (datatab[band+'mag_aperture03']<50) & (datatab[band+'mag_aperture04']<50) & (datatab[band+'mag_aperture05']<50) & (datatab[band+'mag_aperture06']<50) & (datatab[band+'mag_aperture07']<50) & (datatab[band+'mag_aperture08']<50) & (datatab[band+'mag_aperture09']<50)]
datatab=datatab_flag
num=len(datatab)
Nnew=num
num=str(num)
flag=('No Saturated Centers', 'no_satcen')
if no_edge:
datatab_flag = datatab[(datatab[band+'flags_pixel_edge']==False) & (datatab[band+'mag_aperture00']<50)& (datatab[band+'mag_aperture01']<50)& (datatab[band+'mag_aperture02']<50) & (datatab[band+'mag_aperture03']<50) & (datatab[band+'mag_aperture04']<50) & (datatab[band+'mag_aperture05']<50) & (datatab[band+'mag_aperture06']<50) & (datatab[band+'mag_aperture07']<50) & (datatab[band+'mag_aperture08']<50) & (datatab[band+'mag_aperture09']<50)]
#this should hopefully get rid of random bad pixels
datatab=datatab_flag
num=len(datatab)
Nnew=num
num=str(num)
flag=('No Edge Galaxies', 'no_edge')
outdir='/Users/amandanewmark/repositories/galaxy_dark_matter/lumprofplots/single_plot/'
if not os.path.exists(outdir):
os.mkdir(outdir)
for i in range(0,Nnew): #this goes through every galaxy
objID=datatab['object_id']
name=objID[i]
name=str(name)
print(Nnew, N)
print(name)
magap=[]
SB=[]
magerr=[]
SBerr=[]
for j in range(0,Naps): #this goes through every aperture for maginosity
j=str(j)
mag=datatab[band+'mag_aperture0'+j][i]
merr=datatab[band+'mag_aperture0'+j+'_err'][i]
magap.append(mag)
magerr.append(merr)
for j in range(0,Naps): #this goes through every aperture for Sb
js=str(j)
mag=datatab[band+'mag_aperture0'+js][i]
merr=datatab[band+'mag_aperture0'+js+'_err'][i]
sb=mag+2.5*math.log10(4*pi*aperture[j]**2)
sberr=diff(sb)
#F=math.pow(10, -mag/2.5)
#sb=-2.5*math.log10(F/(4*pi*aperture[j]**2))
#Ferr=math.pow(10, -merr/2.5)
#sberr=-2.5*math.log10(Ferr/(4*pi*aperture[j]**2))
SB.append(sb)
SBerr.append(sberr)
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True)
ax0.scatter(aperture, magap, c='k', marker='^')
ax0.errorbar(aperture, magap, yerr=magerr, marker='', mfc='red', mec='green', ms=5, mew=1)
ax0.invert_yaxis()
ax0.set_xlabel('Aperture Radius (arcseconds)', fontsize=9)
ax0.set_ylabel('Aperture Magnitude', fontsize=9)
ax0.set_title("Luminosity Profiles vs. Aperture Radius in "+band+" ("+name+')', fontsize=11)
ax0.set_xlim(xmin=0, xmax=max(aperture))
plt.plot(0,0,label=flag[0]+'('+num+')', marker='', c='k')
plt.legend(loc=4,prop={'size':4})
ax1.scatter(aperture, SB, c='k', marker='^')
ax1.errorbar(aperture, SB, yerr=SBerr, marker='', mfc='red', mec='green', ms=5, mew=1)
ax1.invert_yaxis()
ax1.set_xlabel('Aperture Radius (arcseconds)', fontsize=9)
ax1.set_ylabel('Surface Brightness(mag/arcsec^2)', fontsize=9)
ax1.set_title("Surface Brightness vs. Aperture Radius in "+band+" ("+name+')', fontsize=11)
#plt.show()
fig.savefig(outdir+flag[1]+'_'+band+name+'_lumprof.pdf')
#break | mit |
dagss/numpy_svn | numpy/lib/polynomial.py | 58 | 35930 | """
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError, "input must be 1d or square 2d array."
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0,roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with
a given sequence of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] Wikipedia, "Companion matrix",
http://en.wikipedia.org/wiki/Companion_matrix
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError, "Order of integral must be positive (see polyder)"
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError, \
"k must be a scalar or a rank-1 array of length 1 or >m."
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError, "Order of derivative must be positive (see polyint)"
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is len(x)*eps, where eps is the relative precision of the float
type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is
False (the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is also
returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first.
If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond : present only if `full` = True
Residuals of the least-squares fit, the effective rank of the scaled
Vandermonde coefficient matrix, its singular values, and the specified
value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
...
x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if x.shape[0] != y.shape[0] :
raise TypeError, "expected x and y to have same length"
# set rcond
if rcond is None :
rcond = len(x)*finfo(x.dtype).eps
# scale x to improve condition number
scale = abs(x).max()
if scale != 0 :
x /= scale
# solve least squares equation for powers of x
v = vander(x, order)
c, resids, rank, s = lstsq(v, y, rcond)
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
# scale returned coefficients
if scale != 0 :
if c.ndim == 1 :
c /= vander([scale], order)[0]
else :
c /= vander([scale], order).T
if full :
return c, resids, rank, s, rcond
else :
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1,a2 = poly1d(a1),poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while 1:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2)+len(toadd2) > wrap) or \
(len(line1)+len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError, "Polynomial must be 1d only."
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError, "Power to non-negative integers only."
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
return NX.alltrue(self.coeffs == other.coeffs)
def __ne__(self, other):
return NX.any(self.coeffs != other.coeffs)
def __setattr__(self, key, val):
raise ValueError, "Attributes cannot be changed this way."
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c','coef','coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError, "Does not support negative powers."
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always',RankWarning)
| bsd-3-clause |
laosiaudi/tensorflow | tensorflow/contrib/learn/python/learn/estimators/classifier_test.py | 16 | 5175 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.session_bundle import manifest_pb2
def iris_input_fn(num_epochs=None):
iris = tf.contrib.learn.datasets.load_iris()
features = tf.train.limit_epochs(
tf.reshape(tf.constant(iris.data), [-1, 4]), num_epochs=num_epochs)
labels = tf.reshape(tf.constant(iris.target), [-1])
return features, labels
def logistic_model_fn(features, labels, unused_mode):
labels = tf.one_hot(labels, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, labels)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def logistic_model_params_fn(features, labels, unused_mode, params):
labels = tf.one_hot(labels, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, labels)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
class ClassifierTest(tf.test.TestCase):
def testIrisAll(self):
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
self._runIrisAll(est)
def testIrisAllWithParams(self):
est = tf.contrib.learn.Classifier(model_fn=logistic_model_params_fn,
n_classes=3,
params={'learning_rate': 0.01})
self._runIrisAll(est)
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
est.fit(input_fn=iris_input_fn, steps=100)
est.evaluate(input_fn=iris_input_fn, steps=1, name='eval')
predict_input_fn = functools.partial(iris_input_fn, num_epochs=1)
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertEqual(len(predictions), iris.target.shape[0])
def _runIrisAll(self, est):
iris = tf.contrib.learn.datasets.load_iris()
est.fit(iris.data, iris.target, steps=100)
scores = est.evaluate(x=iris.data, y=iris.target, name='eval')
predictions = list(est.predict(x=iris.data))
predictions_proba = list(est.predict_proba(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
self.assertAllEqual(predictions, np.argmax(predictions_proba, axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions)
self.assertAllClose(other_score, scores['accuracy'])
def _get_default_signature(self, export_meta_filename):
"""Gets the default signature from the export.meta file."""
with tf.Session():
save = tf.train.import_meta_graph(export_meta_filename)
meta_graph_def = save.export_meta_graph()
collection_def = meta_graph_def.collection_def
signatures_any = collection_def['serving_signatures'].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
default_signature = signatures.default_signature
return default_signature
# Disable this test case until b/31032996 is fixed.
def _testExportMonitorRegressionSignature(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=export_dir,
exports_to_keep=1,
signature_fn=tf.contrib.learn.classifier.classification_signature_fn)
est.fit(iris.data, iris.target, steps=2, monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
self.assertFalse(tf.gfile.Exists(export_dir + '00000000/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000002/export'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000002/export.meta')
self.assertTrue(signature.HasField('classification_signature'))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
iemejia/beam | sdks/python/apache_beam/dataframe/partitionings.py | 4 | 5797 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Any
from typing import Iterable
from typing import Tuple
from typing import TypeVar
import numpy as np
import pandas as pd
Frame = TypeVar('Frame', bound=pd.core.generic.NDFrame)
class Partitioning(object):
"""A class representing a (consistent) partitioning of dataframe objects.
"""
def __repr__(self):
return self.__class__.__name__
def is_subpartitioning_of(self, other):
# type: (Partitioning) -> bool
"""Returns whether self is a sub-partition of other.
Specifically, returns whether something partitioned by self is necissarily
also partitioned by other.
"""
raise NotImplementedError
def __lt__(self, other):
return self != other and self <= other
def __le__(self, other):
return not self.is_subpartitioning_of(other)
def partition_fn(self, df, num_partitions):
# type: (Frame, int) -> Iterable[Tuple[Any, Frame]]
"""A callable that actually performs the partitioning of a Frame df.
This will be invoked via a FlatMap in conjunction with a GroupKey to
achieve the desired partitioning.
"""
raise NotImplementedError
def test_partition_fn(self, df):
return self.partition_fn(df, 5)
class Index(Partitioning):
"""A partitioning by index (either fully or partially).
If the set of "levels" of the index to consider is not specified, the entire
index is used.
These form a partial order, given by
Singleton() < Index([i]) < Index([i, j]) < ... < Index() < Arbitrary()
The ordering is implemented via the is_subpartitioning_of method, where the
examples on the right are subpartitionings of the examples on the left above.
"""
def __init__(self, levels=None):
self._levels = levels
def __repr__(self):
if self._levels:
return 'Index%s' % self._levels
else:
return 'Index'
def __eq__(self, other):
return type(self) == type(other) and self._levels == other._levels
def __hash__(self):
if self._levels:
return hash(tuple(sorted(self._levels)))
else:
return hash(type(self))
def is_subpartitioning_of(self, other):
if isinstance(other, Singleton):
return True
elif isinstance(other, Index):
if self._levels is None:
return True
elif other._levels is None:
return False
else:
return all(level in self._levels for level in other._levels)
elif isinstance(other, Arbitrary):
return False
else:
raise ValueError(f"Encountered unknown type {other!r}")
def _hash_index(self, df):
if self._levels is None:
levels = list(range(df.index.nlevels))
else:
levels = self._levels
return sum(
pd.util.hash_array(np.asarray(df.index.get_level_values(level)))
for level in levels)
def partition_fn(self, df, num_partitions):
hashes = self._hash_index(df)
for key in range(num_partitions):
yield key, df[hashes % num_partitions == key]
def check(self, dfs):
# Drop empty DataFrames
dfs = [df for df in dfs if len(df)]
if not len(dfs):
return True
def apply_consistent_order(dfs):
# Apply consistent order between dataframes by using sum of the index's
# hash.
# Apply consistent order within dataframe with sort_index()
# Also drops any empty dataframes.
return sorted((df.sort_index() for df in dfs if len(df)),
key=lambda df: sum(self._hash_index(df)))
dfs = apply_consistent_order(dfs)
repartitioned_dfs = apply_consistent_order(
df for _, df in self.test_partition_fn(pd.concat(dfs)))
# Assert that each index is identical
for df, repartitioned_df in zip(dfs, repartitioned_dfs):
if not df.index.equals(repartitioned_df.index):
return False
return True
class Singleton(Partitioning):
"""A partitioning of all the data into a single partition.
"""
def __init__(self, reason=None):
self._reason = reason
@property
def reason(self):
return self._reason
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def is_subpartitioning_of(self, other):
return isinstance(other, Singleton)
def partition_fn(self, df, num_partitions):
yield None, df
def check(self, dfs):
return len(dfs) <= 1
class Arbitrary(Partitioning):
"""A partitioning imposing no constraints on the actual partitioning.
"""
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def is_subpartitioning_of(self, other):
return True
def test_partition_fn(self, df):
num_partitions = 10
def shuffled(seq):
seq = list(seq)
random.shuffle(seq)
return seq
# pylint: disable=range-builtin-not-iterating
part = pd.Series(shuffled(range(len(df))), index=df.index) % num_partitions
for k in range(num_partitions):
yield k, df[part == k]
def check(self, dfs):
return True
| apache-2.0 |
Nyker510/scikit-learn | examples/manifold/plot_lle_digits.py | 181 | 8510 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
krafczyk/spack | var/spack/repos/builtin/packages/julia/package.py | 2 | 9890 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
import sys
class Julia(Package):
"""The Julia Language: A fresh approach to technical computing"""
homepage = "http://julialang.org"
url = "https://github.com/JuliaLang/julia/releases/download/v0.4.3/julia-0.4.3-full.tar.gz"
git = "https://github.com/JuliaLang/julia.git"
version('master', branch='master')
version('0.6.2', '255d80bc8d56d5f059fe18f0798e32f6')
version('release-0.5', branch='release-0.5')
version('0.5.2', '8c3fff150a6f96cf0536fb3b4eaa5cbb')
version('0.5.1', 'bce119b98f274e0f07ce01498c463ad5')
version('0.5.0', 'b61385671ba74767ab452363c43131fb')
version('release-0.4', branch='release-0.4')
version('0.4.7', '75a7a7dd882b7840829d8f165e9b9078')
version('0.4.6', 'd88db18c579049c23ab8ef427ccedf5d')
version('0.4.5', '69141ff5aa6cee7c0ec8c85a34aa49a6')
version('0.4.3', '8a4a59fd335b05090dd1ebefbbe5aaac')
# TODO: Split these out into jl-hdf5, jl-mpi packages etc.
variant("cxx", default=False, description="Prepare for Julia Cxx package")
variant("hdf5", default=False, description="Install Julia HDF5 package")
variant("mpi", default=True, description="Install Julia MPI package")
variant("plot", default=False,
description="Install Julia plotting packages")
variant("python", default=False,
description="Install Julia Python package")
variant("simd", default=False, description="Install Julia SIMD package")
patch('gc.patch', when='@0.4:0.4.5')
patch('openblas.patch', when='@0.4:0.4.5')
variant('binutils', default=sys.platform != 'darwin',
description="Build via binutils")
# Build-time dependencies:
# depends_on("awk")
depends_on("m4", type="build")
# depends_on("pkgconfig")
# Combined build-time and run-time dependencies:
# (Yes, these are run-time dependencies used by Julia's package manager.)
depends_on("binutils", when='+binutils')
depends_on("cmake @2.8:")
depends_on("curl")
depends_on("git", when='@:0.4')
depends_on("git", when='@release-0.4')
depends_on("openssl")
depends_on("[email protected]:2.8")
# Run-time dependencies:
# depends_on("arpack")
# depends_on("fftw +float")
# depends_on("gmp")
# depends_on("libgit")
# depends_on("mpfr")
# depends_on("openblas")
# depends_on("pcre2")
# ARPACK: Requires BLAS and LAPACK; needs to use the same version
# as Julia.
# BLAS and LAPACK: Julia prefers 64-bit versions on 64-bit
# systems. OpenBLAS has an option for this; make it available as
# variant.
# FFTW: Something doesn't work when using a pre-installed FFTW
# library; need to investigate.
# GMP, MPFR: Something doesn't work when using a pre-installed
# FFTW library; need to investigate.
# LLVM: Julia works only with specific versions, and might require
# patches. Thus we let Julia install its own LLVM.
# Other possible dependencies:
# USE_SYSTEM_OPENLIBM=0
# USE_SYSTEM_OPENSPECFUN=0
# USE_SYSTEM_DSFMT=0
# USE_SYSTEM_SUITESPARSE=0
# USE_SYSTEM_UTF8PROC=0
# USE_SYSTEM_LIBGIT2=0
# Run-time dependencies for Julia packages:
depends_on("hdf5", when="+hdf5", type="run")
depends_on("mpi", when="+mpi", type="run")
depends_on("py-matplotlib", when="+plot", type="run")
def install(self, spec, prefix):
# Julia needs git tags
if os.path.isfile(".git/shallow"):
git = which("git")
git("fetch", "--unshallow")
# Explicitly setting CC, CXX, or FC breaks building libuv, one
# of Julia's dependencies. This might be a Darwin-specific
# problem. Given how Spack sets up compilers, Julia should
# still use Spack's compilers, even if we don't specify them
# explicitly.
options = [
# "CC=cc",
# "CXX=c++",
# "FC=fc",
# "USE_SYSTEM_ARPACK=1",
"override USE_SYSTEM_CURL=1",
# "USE_SYSTEM_FFTW=1",
# "USE_SYSTEM_GMP=1",
# "USE_SYSTEM_MPFR=1",
# "USE_SYSTEM_PCRE=1",
"prefix=%s" % prefix]
if "+cxx" in spec:
if "@master" not in spec:
raise InstallError(
"Variant +cxx requires the @master version of Julia")
options += [
"BUILD_LLVM_CLANG=1",
"LLVM_ASSERTIONS=1",
"USE_LLVM_SHLIB=1"]
with open('Make.user', 'w') as f:
f.write('\n'.join(options) + '\n')
make()
make("install")
# Julia's package manager needs a certificate
cacert_dir = join_path(prefix, "etc", "curl")
mkdirp(cacert_dir)
cacert_file = join_path(cacert_dir, "cacert.pem")
curl = which("curl")
curl("--create-dirs",
"--output", cacert_file,
"https://curl.haxx.se/ca/cacert.pem")
# Put Julia's compiler cache into a private directory
cachedir = join_path(prefix, "var", "julia", "cache")
mkdirp(cachedir)
# Store Julia packages in a private directory
pkgdir = join_path(prefix, "var", "julia", "pkg")
mkdirp(pkgdir)
# Configure Julia
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
if "@master" in spec or "@release-0.5" in spec or "@0.5:" in spec:
# This is required for versions @0.5:
juliarc.write(
'# Point package manager to working certificates\n')
juliarc.write('LibGit2.set_ssl_cert_locations("%s")\n' %
cacert_file)
juliarc.write('\n')
juliarc.write('# Put compiler cache into a private directory\n')
juliarc.write('empty!(Base.LOAD_CACHE_PATH)\n')
juliarc.write('unshift!(Base.LOAD_CACHE_PATH, "%s")\n' % cachedir)
juliarc.write('\n')
juliarc.write('# Put Julia packages into a private directory\n')
juliarc.write('ENV["JULIA_PKGDIR"] = "%s"\n' % pkgdir)
juliarc.write('\n')
# Install some commonly used packages
julia = spec['julia'].command
julia("-e", 'Pkg.init(); Pkg.update()')
# Install HDF5
if "+hdf5" in spec:
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
juliarc.write('# HDF5\n')
juliarc.write('push!(Libdl.DL_LOAD_PATH, "%s")\n' %
spec["hdf5"].prefix.lib)
juliarc.write('\n')
julia("-e", 'Pkg.add("HDF5"); using HDF5')
julia("-e", 'Pkg.add("JLD"); using JLD')
# Install MPI
if "+mpi" in spec:
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
juliarc.write('# MPI\n')
juliarc.write('ENV["JULIA_MPI_C_COMPILER"] = "%s"\n' %
join_path(spec["mpi"].prefix.bin, "mpicc"))
juliarc.write('ENV["JULIA_MPI_Fortran_COMPILER"] = "%s"\n' %
join_path(spec["mpi"].prefix.bin, "mpifort"))
juliarc.write('\n')
julia("-e", 'Pkg.add("MPI"); using MPI')
# Install Python
if "+python" in spec or "+plot" in spec:
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
juliarc.write('# Python\n')
juliarc.write('ENV["PYTHON"] = "%s"\n' % spec["python"].home)
juliarc.write('\n')
# Python's OpenSSL package installer complains:
# Error: PREFIX too long: 166 characters, but only 128 allowed
# Error: post-link failed for: openssl-1.0.2g-0
julia("-e", 'Pkg.add("PyCall"); using PyCall')
if "+plot" in spec:
julia("-e", 'Pkg.add("PyPlot"); using PyPlot')
julia("-e", 'Pkg.add("Colors"); using Colors')
# These require maybe gtk and image-magick
julia("-e", 'Pkg.add("Plots"); using Plots')
julia("-e", 'Pkg.add("PlotRecipes"); using PlotRecipes')
julia("-e", 'Pkg.add("UnicodePlots"); using UnicodePlots')
julia("-e", """\
using Plots
using UnicodePlots
unicodeplots()
plot(x->sin(x)*cos(x), linspace(0, 2pi))
""")
# Install SIMD
if "+simd" in spec:
julia("-e", 'Pkg.add("SIMD"); using SIMD')
julia("-e", 'Pkg.status()')
| lgpl-2.1 |
JacekPierzchlewski/RxCS | examples/signals/vect_sparse_ex0.py | 1 | 2944 | """
This script is an example of how to use the random sparse vector
generator module. |br|
In this example 5 random sparse vectors are generated. |br|
After the generation, the generated sparse vectors are plotted. |br|
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <[email protected]>
*Version*:
1.0 | 22-JAN-2015 : * Version 1.0 released. |br|
2.0 | 15-JUL-2015 : * Version 2.0 released. |br|
*License*:
BSD 2-Clause
"""
from __future__ import division
import rxcs
import numpy as np
import matplotlib.pyplot as plt
def _vect_sparse_ex0():
# SETTINGS:
iN = 20 # Size of the vectors
iS = 0.2 # Sparsity parameter (0.2 * 20 = 4 non-zero elements)
iNVects = 5 # The number of vectors
# Things on the table:
sparseVector = rxcs.sig.sparseVector() # Sparse vectors generator
# Configure the generator...
sparseVector.iVectSiz = iN # Size of the vectors
sparseVector.iS = iS # Sparsity parameter
sparseVector.iNVect = iNVects # The number of vectors
sparseVector.run() # ... and run it!
mVecs = sparseVector.mVects # take the generated vectors
# -----------------------------------------------------------------
# Plot the sparse vectors
hFig1 = plt.figure(1)
hSubPlot1 = hFig1.add_subplot(511)
hSubPlot1.grid(True)
hSubPlot1.set_title('Vector #1')
hSubPlot1.stem(np.arange(iN), mVecs[0, :], linefmt='b-', markerfmt='bo', basefmt='r-')
hSubPlot1.set_xlim(-1, iN + 1)
hSubPlot1.set_ylim(-0.1, 1.1)
hSubPlot1 = hFig1.add_subplot(512)
hSubPlot1.grid(True)
hSubPlot1.set_title('Vector #2')
hSubPlot1.stem(np.arange(iN), mVecs[1, :], linefmt='b-', markerfmt='bo', basefmt='r-')
hSubPlot1.set_xlim(-1, iN + 1)
hSubPlot1.set_ylim(-0.1, 1.1)
hSubPlot1 = hFig1.add_subplot(513)
hSubPlot1.grid(True)
hSubPlot1.set_title('Vector #3')
hSubPlot1.stem(np.arange(iN), mVecs[2, :], linefmt='b-', markerfmt='bo', basefmt='r-')
hSubPlot1.set_xlim(-1, iN + 1)
hSubPlot1.set_ylim(-0.1, 1.1)
hSubPlot1 = hFig1.add_subplot(514)
hSubPlot1.grid(True)
hSubPlot1.set_title('Vector #4')
hSubPlot1.stem(np.arange(iN), mVecs[3, :], linefmt='b-', markerfmt='bo', basefmt='r-')
hSubPlot1.set_xlim(-1, iN + 1)
hSubPlot1.set_ylim(-0.1, 1.1)
hSubPlot1 = hFig1.add_subplot(515)
hSubPlot1.grid(True)
hSubPlot1.set_title('Vector #5')
hSubPlot1.stem(np.arange(iN), mVecs[4, :], linefmt='b-', markerfmt='bo', basefmt='r-')
hSubPlot1.set_xlim(-1, iN + 1)
hSubPlot1.set_ylim(-0.1, 1.1)
# -----------------------------------------------------------------
plt.show(block=True)
# =====================================================================
# Trigger when start as a script
# =====================================================================
if __name__ == '__main__':
_vect_sparse_ex0()
| bsd-2-clause |
walterreade/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 87 | 3903 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
mojosaurus/netflix-project | com/demimojo/netflix/matrix/preprocessing.py | 1 | 4272 | __author__ = 'mojosaurus'
import sys
from com.demimojo.netflix.loader import Constants
from sklearn.preprocessing import normalize
import csv
from com import logger
import glob
from scipy.sparse import lil_matrix
import numpy as np
class PreProcess():
def __init__(self):
logger.info("Starting pre-procesing")
def process(self):
self.__initDataStructures()
self.__constructUberMatrix()
self.__normalizeMatrix()
def getNormalizedMatrix(self):
return self.normalizedMatrix
def getRating(self, row, col):
return self.uberMatrix[row, col]
def getNormalizedRating(self, row, col):
return self.normalizedMatrix[row, col]
def getAverageUserRating(self, userId):
return self.userAvg[userId]
def getAverageMovieRating(self, movieId):
return self.movieAvg[movieId]
def getNumUserRating(self, userId):
return self.userCnt[userId]
def getNumMovieRating(self, movieId):
logger.info("Getting rating count got movie %d " % movieId)
return self.movieCnt[movieId]
def __initDataStructures(self):
logger.info("Initialising the sparse matrices - uber and normalized")
self.uberMatrix = lil_matrix((2649430, 17770), dtype=np.float64)
self.normalizedMatrix = lil_matrix((2649430, 17770), dtype=np.float64)
logger.info("Initialising associated arrays")
self.userAvg = [0 for i in range(0, 2649430)]
self.userCnt = [0 for i in range(0, 2649430)]
self.movieAvg = [0 for i in range(0, 17771)]
self.movieCnt = [0 for i in range(0, 17771)]
logger.info("Arrays initialized")
def __constructUberMatrix(self):
logger.info("Constructing the uber matrix")
files = sorted(glob.glob(Constants.DATA_DIR + "training_set/*.txt"))
for file in files:
with open(file, 'r') as f:
elements = [line.split(',', 2) for line in f]
for element in elements:
if len(element) == 1:
movieId = int(element[0].replace(':', '').rstrip())
logger.info("Starting movie %d " % movieId)
else:
userId = int(element[0])
rating = float(element[1])
self.uberMatrix[userId, movieId] = float(rating)
# Now, calculate the average for the movie and the user.
self.userAvg[userId] = (self.userAvg[userId] + rating) / 2 if self.userAvg[userId] != 0 else rating
self.userCnt[userId] += 1
self.movieAvg[movieId] = (self.movieAvg[movieId] + rating) / 2 if self.movieAvg[movieId] != 0 else rating
self.movieCnt[movieId] += 1
logger.info("Movie id %d ended " % movieId)
f.close()
logger.info("Uber matrix constructed")
def __writerFiles(self):
logger.info("Writing file user_average.csv")
with open(Constants.DATA_DIR+'user_average.csv', 'w') as fh:
writer = csv.writer(fh, delimiter=',')
for i in range(1, len(self.userAvg)):
writer.writerow([i, self.userCnt[i], self.userAvg[i]])
fh.close()
logger.info("Writing file movie_average.csv")
with open(Constants.DATA_DIR+'movie_average.csv', 'w') as fh:
writer = csv.writer(fh, delimiter=',')
for i in range(1, len(self.movieAvg)):
writer.writerow([i, self.movieCnt[i], self.movieAvg[i]])
fh.close()
def __normalizeMatrix(self):
# Now, onto normalising the matrix.
logger.info("Intializing the normalized matrix")
rows, cols = self.uberMatrix.nonzero()
for i in range(0, len(rows)):
row = rows[i]
col = cols[i]
val = self.uberMatrix[row, col]
normalizedValue = self.userAvg[row] - val
self.normalizedMatrix[row, col] = normalizedValue
# Delete the object to free up memory. We will need it soon.
logger.info("Matrix normalized")
#
# process = PreProcess()
# process.process()
#
# print process.getNormalizedScore(2643247, 30) | apache-2.0 |
cybernet14/scikit-learn | sklearn/linear_model/least_angle.py | 61 | 54324 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| mit |
deepmind/deepmind-research | geomancer/train.py | 1 | 6748 | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run GEOMANCER on products of synthetic manifolds."""
import re
from absl import app
from absl import flags
from absl import logging
import geomancer
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import special_ortho_group
from tqdm import tqdm
SPECIFICATION = flags.DEFINE_list(
name='specification', default=['S^2', 'S^2'], help='List of submanifolds')
NPTS = flags.DEFINE_integer(
name='npts', default=1000, help='Number of data points')
ROTATE = flags.DEFINE_boolean(
name='rotate', default=False, help='Apply random rotation to the data')
PLOT = flags.DEFINE_boolean(
name='plot', default=True, help='Whether to enable plotting')
def make_so_tangent(q):
"""Given an n x n orthonormal matrix, return a basis for its tangent space."""
n = q.shape[0]
assert np.allclose(q.T @ q, np.eye(n), atol=1e-4, rtol=1e-4)
a = np.zeros((n, n))
ii = 0
dq = np.zeros((n, n, n*(n-1)//2))
for i in range(n):
for j in range(i+1, n):
a[i, j] = 1
a[j, i] = -1
dq[..., ii] = a @ q # tangent vectors are skew-symmetric matrix times Q
a[i, j] = 0
a[j, i] = 0
ii += 1
# reshape and orthonormalize the result
return np.linalg.qr(np.reshape(dq, (n**2, n*(n-1)//2)))[0]
def make_sphere_tangent(x):
_, _, v = np.linalg.svd(x[None, :])
return v[:, 1:]
def make_true_tangents(spec, data):
"""Return a set of orthonormal bases, one for each submanifold."""
for i in range(spec.shape[1]):
assert spec[0, i] == 0 or spec[1, i] == 0
so_dim = sum(dim ** 2 for dim in spec[0])
sphere_dim = sum(dim+1 if dim > 0 else 0 for dim in spec[1])
assert so_dim + sphere_dim == data.shape[0]
ii = 0
tangents = []
for i in range(spec.shape[1]):
if spec[0, i] != 0:
dim = spec[0, i]
tangents.append(make_so_tangent(np.reshape(data[ii:ii+dim**2],
(dim, dim))))
ii += dim ** 2
else:
dim = spec[1, i]
tangents.append(make_sphere_tangent(data[ii:ii+dim+1]))
ii += dim + 1
tangents2 = []
for i in range(len(tangents)):
size1 = sum(x.shape[0] for x in tangents[:i])
size2 = sum(x.shape[0] for x in tangents[i+1:])
tangents2.append(np.concatenate(
(np.zeros((size1, tangents[i].shape[1])),
tangents[i],
np.zeros((size2, tangents[i].shape[1]))), axis=0))
return tangents2
def make_product_manifold(specification, npts):
"""Generate data from a product of manifolds with the given specification."""
data = []
tangents = []
latent_dim = 0
spec_array = np.zeros((2, len(specification)), dtype=np.int32)
for i, spec in enumerate(specification):
so_spec = re.search(r'SO\(([0-9]+)\)', spec) # matches "SO(<numbers>)"
sphere_spec = re.search(r'S\^([0-9]+)', spec) # matches "S^<numbers>"
if sphere_spec is not None:
dim = int(sphere_spec.group(1))
spec_array[1, i] = dim
latent_dim += dim
dat = np.random.randn(npts, dim+1)
dat /= np.tile(np.sqrt(np.sum(dat**2, axis=1)[..., None]),
[1, dim+1])
elif so_spec is not None:
dim = int(so_spec.group(1))
spec_array[0, i] = dim
latent_dim += dim * (dim - 1) // 2
dat = [np.ndarray.flatten(special_ortho_group.rvs(dim), order='C')
for _ in range(npts)]
dat = np.stack(dat)
else:
raise ValueError(f'Unrecognized manifold: {spec}')
data.append(dat)
data = np.concatenate(data, axis=1)
for i in range(spec_array.shape[1]):
if spec_array[0, i] != 0:
dim = spec_array[0, i]
tangents.append(np.zeros((npts, data.shape[1], dim * (dim - 1) // 2)))
elif spec_array[1, i] != 0:
dim = spec_array[1, i]
tangents.append(np.zeros((npts, data.shape[1], dim)))
for i in tqdm(range(npts)):
true_tangent = make_true_tangents(spec_array, data[i])
for j in range(len(specification)):
tangents[j][i] = true_tangent[j]
logging.info('Constructed data and true tangents for %s',
' x '.join(specification))
return data, latent_dim, tangents
def main(_):
# Generate data and run GEOMANCER
data, dim, tangents = make_product_manifold(SPECIFICATION.value, NPTS.value)
if ROTATE.value:
rot, _ = np.linalg.qr(np.random.randn(data.shape[1], data.shape[1]))
data_rot = data @ rot.T
components, spectrum = geomancer.fit(data_rot, dim)
errors = geomancer.eval_unaligned(data_rot, components, data, tangents)
else:
components, spectrum = geomancer.fit(data, dim)
errors = geomancer.eval_aligned(components, tangents)
logging.info('Error between subspaces: %.2f +/- %.2f radians',
np.mean(errors),
np.std(errors))
if PLOT.value:
# Plot spectrum
plt.figure(figsize=(8, 6))
plt.scatter(np.arange(len(spectrum)), spectrum, s=100)
largest_gap = np.argmax(spectrum[1:]-spectrum[:-1]) + 1
plt.axvline(largest_gap, linewidth=2, c='r')
plt.xticks([])
plt.yticks(fontsize=18)
plt.xlabel('Index', fontsize=24)
plt.ylabel('Eigenvalue', fontsize=24)
plt.title('GeoManCEr Eigenvalue Spectrum', fontsize=24)
# Plot subspace bases
fig = plt.figure(figsize=(8, 6))
bases = components[0]
gs = gridspec.GridSpec(1, len(bases),
width_ratios=[b.shape[1] for b in bases])
for i in range(len(bases)):
ax = plt.subplot(gs[i])
ax.imshow(bases[i])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(r'$T_{\mathbf{x}_1}\mathcal{M}_%d$' % (i+1), fontsize=18)
fig.canvas.set_window_title('GeoManCEr Results')
# Plot ground truth
fig = plt.figure(figsize=(8, 6))
gs = gridspec.GridSpec(1, len(tangents),
width_ratios=[b.shape[2] for b in tangents])
for i, spec in enumerate(SPECIFICATION.value):
ax = plt.subplot(gs[i])
ax.imshow(tangents[i][0])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(r'$T_{\mathbf{x}_1}%s$' % spec, fontsize=18)
fig.canvas.set_window_title('Ground Truth')
plt.show()
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/python/client/notebook.py | 109 | 4791 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| mit |
nguyenti/213-twitter-trend-cloud | cloud_maker.py | 1 | 1480 | import matplotlib.pyplot as plt
from wordcloud import WordCloud
from time import sleep
fn = "output/clouds.txt"
if __name__ == '__main__':
# map of the form trend : map of word:counts
trend_clouds = {}
with open(fn, "r") as fp:
trend = fp.readline()
while(trend):
# read correlated words and weights pairs, ignore the last one
word_counts = fp.readline()
word_counts = word_counts.split(',')[:-1]
tuples = [(x.split(':')[0],
int(x.split(':')[1])) for x in word_counts]
if trend in trend_clouds:
for k,v in tuples:
if k in trend_clouds[trend]:
trend_clouds[trend][k] = trend_clouds[trend][k] + v
else:
trend_clouds[trend][k] = v
else: # add new entry for this trend
trend_clouds[trend] = dict(tuples)
trend = fp.readline()
# display word clouds
plt.ion()
#plt.figure()
for trend in trend_clouds.iterkeys():
if len(trend_clouds[trend]) < 1:
continue
wc = WordCloud().generate_from_frequencies(trend_clouds[trend].items())
fig = plt.figure()
fig.canvas.set_window_title(trend)
plt.imshow(wc)
plt.axis("off")
plt.pause(2.0) # delay for 2 seconds
# infinite loop so images stay alive
while True:
plt.pause(.05)
| lgpl-3.0 |
musically-ut/statsmodels | statsmodels/examples/ex_generic_mle.py | 32 | 16462 |
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
# in this dir
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog*2, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
def probitloglike(params, endog, exog):
"""
Log likelihood for the probit
"""
q = 2*endog - 1
X = exog
return np.add.reduce(stats.norm.logcdf(q*np.dot(X,params)))
mod = GenericLikelihoodModel(data.endog, data.exog, loglike=probitloglike)
res = mod.fit(method="nm", fargs=(data.endog,data.exog), maxiter=500)
print(res)
#np.allclose(res.params, probit_res.params)
print(res.params, probit_res.params)
#datal = sm.datasets.longley.load()
datal = sm.datasets.ccard.load()
datal.exog = sm.add_constant(datal.exog, prepend=False)
# Instance of GenericLikelihood model doesn't work directly, because loglike
# cannot get access to data in self.endog, self.exog
nobs = 5000
rvs = np.random.randn(nobs,6)
datal.exog = rvs[:,:-1]
datal.exog = sm.add_constant(datal.exog, prepend=False)
datal.endog = 1 + rvs.sum(1)
show_error = False
show_error2 = 1#False
if show_error:
def loglike_norm_xb(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(method="nm", maxiter = 500)
print(res_norm.params)
if show_error2:
def loglike_norm_xb(params, endog, exog):
beta = params[:-1]
sigma = params[-1]
#print exog.shape, beta.shape
xb = np.dot(exog, beta)
#print xb.shape, stats.norm.logpdf(endog, loc=xb, scale=sigma).shape
return stats.norm.logpdf(endog, loc=xb, scale=sigma).sum()
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1),
method="nm", maxiter = 5000,
fargs=(datal.endog, datal.exog))
print(res_norm.params)
class MygMLE(GenericLikelihoodModel):
# just for testing
def loglike(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma).sum()
def loglikeobs(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm2 = MygMLE(datal.endog, datal.exog)
#res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1), method="nm", maxiter = 500)
res_norm2 = mod_norm2.fit(start_params=[1.]*datal.exog.shape[1]+[1], method="nm", maxiter = 500)
print(res_norm2.params)
res2 = sm.OLS(datal.endog, datal.exog).fit()
start_params = np.hstack((res2.params, np.sqrt(res2.mse_resid)))
res_norm3 = mod_norm2.fit(start_params=start_params, method="nm", maxiter = 500,
retall=0)
print(start_params)
print(res_norm3.params)
print(res2.bse)
#print res_norm3.bse # not available
print('llf', res2.llf, res_norm3.llf)
bse = np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_norm3.params))))
res_norm3.model.score(res_norm3.params)
#fprime in fit option cannot be overwritten, set to None, when score is defined
# exception is fixed, but I don't think score was supposed to be called
'''
>>> mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None, maxiter
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 316, in fit
disp=disp, retall=retall, callback=callback)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
710, in fmin_bfgs
gfk = myfprime(x0)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
103, in function_wrapper
return function(x, *args)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 240, in <lambda>
score = lambda params: -self.score(params)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 480, in score
return approx_fprime1(params, self.nloglike)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\sandbox\regression\numdiff.py", line 81, in approx_fprime1
nobs = np.size(f0) #len(f0)
TypeError: object of type 'numpy.float64' has no len()
'''
res_bfgs = mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None,
maxiter = 500, retall=0)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
hf=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
hh = (hf+hb)/2.
print(np.linalg.eigh(hh))
grad = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
print(grad)
gradb = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
gradf = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
print((gradb+gradf)/2.)
print(res_norm3.model.score(res_norm3.params))
print(res_norm3.model.score(start_params))
mod_norm2.loglike(start_params/2.)
print(np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params)))
print(np.sqrt(np.diag(res_bfgs.cov_params())))
print(res_norm3.bse)
print("MLE - OLS parameter estimates")
print(res_norm3.params[:-1] - res2.params)
print("bse diff in percent")
print((res_norm3.bse[:-1] / res2.bse)*100. - 100)
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
Optimization terminated successfully.
Current function value: 12.818804
Iterations 6
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
<statsmodels.model.LikelihoodModelResults object at 0x02131290>
[ 1.6258006 0.05172931 1.42632252 -7.45229732] [ 1.62581004 0.05172895 1.42633234 -7.45231965]
Warning: Maximum number of function evaluations has been exceeded.
[ -1.18109149 246.94438535 -16.21235536 24.05282629 -324.80867176
274.07378453]
Warning: Maximum number of iterations has been exceeded
[ 17.57107 -149.87528787 19.89079376 -72.49810777 -50.06067953
306.14170418]
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 339
Function evaluations: 550
[ -3.08181404 234.34702702 -14.99684418 27.94090839 -237.1465136
284.75079529]
[ -3.08181304 234.34701361 -14.99684381 27.94088692 -237.14649571
274.6857294 ]
[ 5.51471653 80.36595035 7.46933695 82.92232357 199.35166485]
llf -506.488764864 -506.488764864
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 9
Function evaluations: 13
Gradient evaluations: 13
(array([ 2.41772580e-05, 1.62492628e-04, 2.79438138e-04,
1.90996240e-03, 2.07117946e-01, 1.28747174e+00]), array([[ 1.52225754e-02, 2.01838216e-02, 6.90127235e-02,
-2.57002471e-04, -5.25941060e-01, -8.47339404e-01],
[ 2.39797491e-01, -2.32325602e-01, -9.36235262e-01,
3.02434938e-03, 3.95614029e-02, -1.02035585e-01],
[ -2.11381471e-02, 3.01074776e-02, 7.97208277e-02,
-2.94955832e-04, 8.49402362e-01, -5.20391053e-01],
[ -1.55821981e-01, -9.66926643e-01, 2.01517298e-01,
1.52397702e-03, 4.13805882e-03, -1.19878714e-02],
[ -9.57881586e-01, 9.87911166e-02, -2.67819451e-01,
1.55192932e-03, -1.78717579e-02, -2.55757014e-02],
[ -9.96486655e-04, -2.03697290e-03, -2.98130314e-03,
-9.99992985e-01, -1.71500426e-05, 4.70854949e-06]]))
[[ -4.91007768e-05 -7.28732630e-07 -2.51941401e-05 -2.50111043e-08
-4.77484718e-08 -9.72022463e-08]]
[[ -1.64845915e-08 -2.87059265e-08 -2.88764568e-07 -6.82121026e-09
2.84217094e-10 -1.70530257e-09]]
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> print res_norm3.model.score(res_norm3.params)
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
>>> print res_norm3.model.score(start_params)
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
>>> mod_norm2.loglike(start_params/2.)
-598.56178102781314
>>> print np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params))
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> print np.sqrt(np.diag(res_bfgs.cov_params()))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print res_norm3.bse
[ 5.47162086 75.03147114 6.98192136 82.60858536 185.40595756
22.88919522]
>>> res_norm3.conf_int
<bound method LikelihoodModelResults.conf_int of <statsmodels.model.LikelihoodModelResults object at 0x021317F0>>
>>> res_norm3.conf_int()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 993, in conf_int
lower = self.params - dist.ppf(1-alpha/2,self.model.df_resid) *\
AttributeError: 'MygMLE' object has no attribute 'df_resid'
>>> res_norm3.params
array([ -3.08181304, 234.34701361, -14.99684381, 27.94088692,
-237.14649571, 274.6857294 ])
>>> res2.params
array([ -3.08181404, 234.34702702, -14.99684418, 27.94090839,
-237.1465136 ])
>>>
>>> res_norm3.params - res2.params
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: shape mismatch: objects cannot be broadcast to a single shape
>>> res_norm3.params[:-1] - res2.params
array([ 9.96859735e-07, -1.34122981e-05, 3.72278400e-07,
-2.14645839e-05, 1.78919019e-05])
>>>
>>> res_norm3.bse[:-1] - res2.bse
array([ -0.04309567, -5.33447922, -0.48741559, -0.31373822, -13.94570729])
>>> (res_norm3.bse[:-1] / res2.bse) - 1
array([-0.00781467, -0.06637735, -0.06525554, -0.00378352, -0.06995531])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_bfgs.params))))
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
array([ 5.10032831, 74.34988912, 6.96522122, 76.7091604 ,
169.8117832 , 22.91695494])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>>
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([ -7.51422527, -7.4858335 , -6.74913633, -7.49275094, -14.8179759 ])
>>> hb=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=-1e-4)
>>> hf=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=1e-4)
>>> hh = (hf+hb)/2.
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-hh)))
>>> bse_bfgs
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(hh)))
>>> np.diag(hh)
array([ 9.81680159e-01, 1.39920076e-02, 4.98101826e-01,
3.60955710e-04, 9.57811608e-04, 1.90709670e-03])
>>> np.diag(np.inv(hh))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'inv'
>>> np.diag(np.linalg.inv(hh))
array([ 2.64875153e+01, 5.91578496e+03, 5.13279911e+01,
6.11533345e+03, 3.33775960e+04, 5.24357391e+02])
>>> res2.bse**2
array([ 3.04120984e+01, 6.45868598e+03, 5.57909945e+01,
6.87611175e+03, 3.97410863e+04])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> bse_bfgs - res_norm3.bse
array([-0.32501855, 1.88266901, 0.18243424, -4.40798785, -2.71059354,
0.00965609])
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([-6.67512508, -4.29511526, -4.0831115 , -5.69415552, -8.35523538])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> (bse_bfgs / res_norm3.bse)*100. - 100
array([-5.94007812, 2.50917247, 2.61295176, -5.33599242, -1.46197759,
0.04218624])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>> dir(res_bfgs)
['__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__weakref__', 'bse', 'conf_int', 'cov_params', 'f_test', 'initialize', 'llf', 'mle_retvals', 'mle_settings', 'model', 'normalized_cov_params', 'params', 'scale', 't', 't_test']
>>> res_bfgs.scale
1.0
>>> res2.scale
81083.015420213851
>>> res2.mse_resid
81083.015420213851
>>> print np.sqrt(np.diag(np.linalg.inv(-1*mod_norm2.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print np.sqrt(np.diag(np.linalg.inv(-1*res_bfgs.model.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
Is scale a misnomer, actually scale squared, i.e. variance of error term ?
'''
print(res_norm3.model.score_obs(res_norm3.params).shape)
jac = res_norm3.model.score_obs(res_norm3.params)
print(np.sqrt(np.diag(np.dot(jac.T, jac)))/start_params)
jac2 = res_norm3.model.score_obs(res_norm3.params, centered=True)
print(np.sqrt(np.diag(np.linalg.inv(np.dot(jac.T, jac)))))
print(res_norm3.bse)
print(res2.bse)
| bsd-3-clause |
oew1v07/scikit-image | doc/examples/plot_canny.py | 11 | 1633 | """
===================
Canny edge detector
===================
The Canny filter is a multi-stage edge detector. It uses a filter based on the
derivative of a Gaussian in order to compute the intensity of the gradients.The
Gaussian reduces the effect of noise present in the image. Then, potential
edges are thinned down to 1-pixel curves by removing non-maximum pixels of the
gradient magnitude. Finally, edge pixels are kept or removed using hysteresis
thresholding on the gradient magnitude.
The Canny has three adjustable parameters: the width of the Gaussian (the
noisier the image, the greater the width), and the low and high threshold for
the hysteresis thresholding.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import feature
# Generate noisy image of a square
im = np.zeros((128, 128))
im[32:-32, 32:-32] = 1
im = ndi.rotate(im, 15, mode='constant')
im = ndi.gaussian_filter(im, 4)
im += 0.2 * np.random.random(im.shape)
# Compute the Canny filter for two values of sigma
edges1 = feature.canny(im)
edges2 = feature.canny(im, sigma=3)
# display results
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3))
ax1.imshow(im, cmap=plt.cm.jet)
ax1.axis('off')
ax1.set_title('noisy image', fontsize=20)
ax2.imshow(edges1, cmap=plt.cm.gray)
ax2.axis('off')
ax2.set_title('Canny filter, $\sigma=1$', fontsize=20)
ax3.imshow(edges2, cmap=plt.cm.gray)
ax3.axis('off')
ax3.set_title('Canny filter, $\sigma=3$', fontsize=20)
fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
bottom=0.02, left=0.02, right=0.98)
plt.show()
| bsd-3-clause |
chrisburr/scikit-learn | sklearn/model_selection/tests/test_search.py | 20 | 30855 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
# TODO Import from sklearn.exceptions once merged.
from sklearn.base import ChangedBehaviorWarning
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_labels():
# Check if ValueError (when labels is None) propagates to GridSearchCV
# And also check if labels is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
labels = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, labels)
non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_label_cvs:
print(cv)
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
CallaJun/hackprince | indico/matplotlib/backends/backend_qt4agg.py | 11 | 3003 | """
Render to qt from agg
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os # not used
import sys
import ctypes
import warnings
import matplotlib
from matplotlib.figure import Figure
from .backend_qt5agg import NavigationToolbar2QTAgg
from .backend_qt5agg import FigureCanvasQTAggBase
from .backend_agg import FigureCanvasAgg
from .backend_qt4 import QtCore
from .backend_qt4 import FigureManagerQT
from .backend_qt4 import FigureCanvasQT
from .backend_qt4 import NavigationToolbar2QT
##### not used
from .backend_qt4 import show
from .backend_qt4 import draw_if_interactive
from .backend_qt4 import backend_version
######
from matplotlib.cbook import mplDeprecation
DEBUG = False
_decref = ctypes.pythonapi.Py_DecRef
_decref.argtypes = [ctypes.py_object]
_decref.restype = None
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG:
print('backend_qt4agg.new_figure_manager')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
return FigureManagerQT(canvas, num)
class FigureCanvasQTAgg(FigureCanvasQTAggBase,
FigureCanvasQT, FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQtAgg: ', figure)
FigureCanvasQT.__init__(self, figure)
FigureCanvasAgg.__init__(self, figure)
self._drawRect = None
self.blitbox = None
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
# it has been reported that Qt is semi-broken in a windows
# environment. If `self.draw()` uses `update` to trigger a
# system-level window repaint (as is explicitly advised in the
# Qt documentation) the figure responds very slowly to mouse
# input. The work around is to directly use `repaint`
# (against the advice of the Qt documentation). The
# difference between `update` and repaint is that `update`
# schedules a `repaint` for the next time the system is idle,
# where as `repaint` repaints the window immediately. The
# risk is if `self.draw` gets called with in another `repaint`
# method there will be an infinite recursion. Thus, we only
# expose windows users to this risk.
if sys.platform.startswith('win'):
self._priv_update = self.repaint
else:
self._priv_update = self.update
FigureCanvas = FigureCanvasQTAgg
FigureManager = FigureManagerQT
| lgpl-3.0 |
OpenDA-Association/OpenDA | course/exercise_double_pendulum_part2/all_runs.py | 1 | 3168 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Script that will produce the figures of exercise 2
@author: verlaanm
"""
#load numpy and matplotlib if needed
import numpy as np
import matplotlib.pyplot as plt
import simulation_truth_results as truth
import simulation_initial_results as initial
import simulation_enkf_results as enkf
plt.figure()
plt.clf()
plt.subplot(2,1,1)
plt.plot(initial.model_time,initial.x[:,0],'g')
plt.plot(truth.model_time,truth.x[:,0],'k')
plt.plot(enkf.analysis_time,enkf.x_f_central[:,0],'b');
plt.legend(('initial','truth','EnKF'))
plt.ylabel(r'$\theta_1$')
plt.subplot(2,1,2)
plt.plot(initial.model_time,initial.x[:,1],'g')
plt.plot(truth.model_time,truth.x[:,1],'k')
plt.plot(enkf.analysis_time,enkf.x_f_central[:,1],'b');
plt.ylabel(r'$\theta_2$')
plt.xlabel(r'$t$')
plt.savefig('fig_series_enkf.png')
plt.show()
import simulation_enkf_results_seed31415 as enkf2
plt.figure()
plt.clf()
plt.subplot(2,1,1)
plt.plot(initial.model_time,initial.x[:,0],'g')
plt.plot(truth.model_time,truth.x[:,0],'k')
plt.plot(enkf.analysis_time,enkf.x_f_central[:,0],'b');
plt.plot(enkf2.analysis_time,enkf2.x_f_central[:,0],'r');
plt.legend(('initial','truth','EnKF seed=21','EnKF seed=31415'))
plt.ylabel(r'$\theta_1$')
plt.subplot(2,1,2)
plt.plot(initial.model_time,initial.x[:,1],'g')
plt.plot(truth.model_time,truth.x[:,1],'k')
plt.plot(enkf.analysis_time,enkf.x_f_central[:,1],'b');
plt.plot(enkf2.analysis_time,enkf2.x_f_central[:,1],'r');
plt.ylabel(r'$\theta_2$')
plt.xlabel(r'$t$')
plt.savefig('fig_series_enkf_seed31415.png')
plt.show()
import simulation_enkf_results_stdobs2 as enkf2
plt.figure()
plt.clf()
plt.subplot(2,1,1)
plt.plot(initial.model_time,initial.x[:,0],'g')
plt.plot(truth.model_time,truth.x[:,0],'k')
plt.plot(enkf.analysis_time,enkf.x_f_central[:,0],'b');
plt.plot(enkf2.analysis_time,enkf2.x_f_central[:,0],'r');
plt.legend(('initial','truth','EnKF $\sigma_0=0.2$','EnKF $\sigma_o=2.0$'))
plt.ylabel(r'$\theta_1$')
plt.subplot(2,1,2)
plt.plot(initial.model_time,initial.x[:,1],'g')
plt.plot(truth.model_time,truth.x[:,1],'k')
plt.plot(enkf.analysis_time,enkf.x_f_central[:,1],'b');
plt.plot(enkf2.analysis_time,enkf2.x_f_central[:,1],'r');
plt.ylabel(r'$\theta_2$')
plt.xlabel(r'$t$')
plt.savefig('fig_series_enkf_std2.png')
plt.show()
import simulation_enkf_results_ens6 as enkf3
import simulation_enkf_results_ens10 as enkf4
plt.figure()
plt.clf()
plt.subplot(2,1,1)
plt.plot(initial.model_time,initial.x[:,0],'g')
plt.plot(truth.model_time,truth.x[:,0],'k')
plt.plot(enkf.analysis_time,enkf.x_f_central[:,0],'b');
plt.plot(enkf3.analysis_time,enkf3.x_f_central[:,0],'r');
plt.plot(enkf4.analysis_time,enkf4.x_f_central[:,0],'m');
plt.legend(('initial','truth','EnKF n=50','EnKF n=6','Enkf n=10'))
plt.ylabel(r'$\theta_1$')
plt.subplot(2,1,2)
plt.plot(initial.model_time,initial.x[:,1],'g')
plt.plot(truth.model_time,truth.x[:,1],'k')
plt.plot(enkf.analysis_time,enkf.x_f_central[:,1],'b');
plt.plot(enkf3.analysis_time,enkf3.x_f_central[:,1],'r');
plt.plot(enkf4.analysis_time,enkf4.x_f_central[:,1],'m');
plt.ylabel(r'$\theta_2$')
plt.xlabel(r'$t$')
plt.savefig('fig_series_enkf_ens_size.png')
plt.show()
| lgpl-3.0 |
NICTA/linearizedGP | experiments/uspsbclass.py | 1 | 4452 | #! /usr/bin/env python
# linearizedGP -- Implementation of extended and unscented Gaussian processes.
# Copyright (C) 2014 National ICT Australia (NICTA)
#
# This file is part of linearizedGP.
#
# linearizedGP is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# linearizedGP is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with linearizedGP. If not, see <http://www.gnu.org/licenses/>.
""" Run the USPS handwritten digits experiment from our NIPS 2014 paper. Also
see uspsbclass.m for the octave/matlab algorithms. You'll need scikit
learn for this.
Author: Daniel Steinberg ([email protected])
Institute: NICTA
Date: 4 Sep 2014
"""
import numpy as np
from linearizedGP import unscentedGP
from linearizedGP import extendedGP
from linearizedGP import kernels
import scipy.io as sio
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
# Settings --------------------------------------------------------------------
# Data location
datapath = "data/USPS_3_5_data.mat"
# Classification properties
kbounds = (0.1, 0.1)
kinit = (1.0, 1.0)
nbound = 1e-7
ninit = 1e-7
# Sigmoid functions
lgsig = lambda f: 1.0 / (1 + np.exp(-f))
dlgsig = lambda f: lgsig(f) * lgsig(-f)
# kernel functions
kfunc = kernels.kern_selog
# Data ------------------------------------------------------------------------
USPSdata = sio.loadmat(datapath, squeeze_me=True)
x = USPSdata['x'].T
xs = USPSdata['xx'].T
y = USPSdata['y']
ys = USPSdata['yy']
y[y == -1] = 0
ys[ys == -1] = 0
# Train the non-GP classifiers ------------------------------------------------
print("\nLearning the support vector classifier")
C_range = 10.0 ** np.arange(-2, 9)
gamma_range = 10.0 ** np.arange(-5, 4)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedKFold(y=y, n_folds=3)
grid = GridSearchCV(SVC(kernel='rbf', probability=True), param_grid=param_grid,
cv=cv, verbose=1)
grid.fit(x.T, y)
svc = grid.best_estimator_
print("\nLearning the logistic regression classifier")
lreg = LogisticRegression(penalty='l2')
lreg.fit(x.T, y)
# Train the GPs ---------------------------------------------------------------
# Statistical linearisation
print("\nLearning statistically linearised classifier")
sgp = unscentedGP.unscentedGP(nlfunc=lgsig, kfunc=kfunc)
sgp.learnLB(np.log(kbounds), ynoise=nbound)
lml = sgp.learn(x, y, np.log(kinit), ynoise=ninit, verbose=True)
print("Log marginal likelihood = {0}".format(lml))
print("Hyper-parameters = {0}, noise = {1}".format(sgp.kparams, sgp.ynoise))
# Taylor linearisation
print("\nLearning Taylor series linearised classifier")
tgp = extendedGP.extendedGP(nlfunc=lgsig, dnlfunc=dlgsig, kfunc=kfunc)
tgp.learnLB(np.log(kbounds), ynoise=nbound)
lml = tgp.learn(x, y, np.log(kinit), ynoise=ninit, verbose=True)
print("Log marginal likelihood = {0}".format(lml))
print("Hyper-parameters = {0}, noise = {1}".format(tgp.kparams, tgp.ynoise))
# Prediction ------------------------------------------------------------------
def bernloglike(pys):
return -(ys * np.log(pys) + (1 - ys) * np.log(1 - pys)).mean()
def errrate(pys):
return float((ys != (pys >= 0.5)).sum()) / ys.shape[0]
print("\n\nResults: \n----------------")
# Statlin
pys_s, epys_s, Ems_s, Vms_s = sgp.quadpredict(xs)
print("Stat lin: av nll = {:.6f}, Error rate = {:.6f}"
.format(bernloglike(pys_s), errrate(pys_s)))
# Taylorlin
pys_t, epys_t, Ems_t, Vms_t = tgp.predict(xs)
print("Tayl lin: av nll = {:.6f}, Error rate = {:.6f}"
.format(bernloglike(pys_t), errrate(pys_t)))
# SVM
pys_v = svc.predict_proba(xs.T)[:, 1]
print("SVM: av nll = {:.6f}, Error rate = {:.6f}"
.format(bernloglike(pys_v), errrate(pys_v)))
# Logistic Regression
pys_r = lreg.predict_proba(xs.T)[:, 1]
print("Logistic: av nll = {:.6f}, error rate = {:.6f}"
.format(bernloglike(pys_r), errrate(pys_r)))
| gpl-3.0 |
macks22/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
exeex/midi-visualization | roll.py | 1 | 9636 | import mido
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.colors import colorConverter
# inherit the origin mido class
class MidiFile(mido.MidiFile):
def __init__(self, filename):
mido.MidiFile.__init__(self, filename)
self.sr = 10
self.meta = {}
self.events = self.get_events()
def get_events(self):
mid = self
print(mid)
# There is > 16 channel in midi.tracks. However there is only 16 channel related to "music" events.
# We store music events of 16 channel in the list "events" with form [[ch1],[ch2]....[ch16]]
# Lyrics and meta data used a extra channel which is not include in "events"
events = [[] for x in range(16)]
# Iterate all event in the midi and extract to 16 channel form
for track in mid.tracks:
for msg in track:
try:
channel = msg.channel
events[channel].append(msg)
except AttributeError:
try:
if type(msg) != type(mido.UnknownMetaMessage):
self.meta[msg.type] = msg.dict()
else:
pass
except:
print("error",type(msg))
return events
def get_roll(self):
events = self.get_events()
# Identify events, then translate to piano roll
# choose a sample ratio(sr) to down-sample through time axis
sr = self.sr
# compute total length in tick unit
length = self.get_total_ticks()
# allocate memory to numpy array
roll = np.zeros((16, 128, length // sr), dtype="int8")
# use a register array to save the state(no/off) for each key
note_register = [int(-1) for x in range(128)]
# use a register array to save the state(program_change) for each channel
timbre_register = [1 for x in range(16)]
for idx, channel in enumerate(events):
time_counter = 0
volume = 100
# Volume would change by control change event (cc) cc7 & cc11
# Volume 0-100 is mapped to 0-127
print("channel", idx, "start")
for msg in channel:
if msg.type == "control_change":
if msg.control == 7:
volume = msg.value
# directly assign volume
if msg.control == 11:
volume = volume * msg.value // 127
# change volume by percentage
# print("cc", msg.control, msg.value, "duration", msg.time)
if msg.type == "program_change":
timbre_register[idx] = msg.program
print("channel", idx, "pc", msg.program, "time", time_counter, "duration", msg.time)
if msg.type == "note_on":
print("on ", msg.note, "time", time_counter, "duration", msg.time, "velocity", msg.velocity)
note_on_start_time = time_counter // sr
note_on_end_time = (time_counter + msg.time) // sr
intensity = volume * msg.velocity // 127
# When a note_on event *ends* the note start to be play
# Record end time of note_on event if there is no value in register
# When note_off event happens, we fill in the color
if note_register[msg.note] == -1:
note_register[msg.note] = (note_on_end_time,intensity)
else:
# When note_on event happens again, we also fill in the color
old_end_time = note_register[msg.note][0]
old_intensity = note_register[msg.note][1]
roll[idx, msg.note, old_end_time: note_on_end_time] = old_intensity
note_register[msg.note] = (note_on_end_time,intensity)
if msg.type == "note_off":
print("off", msg.note, "time", time_counter, "duration", msg.time, "velocity", msg.velocity)
note_off_start_time = time_counter // sr
note_off_end_time = (time_counter + msg.time) // sr
note_on_end_time = note_register[msg.note][0]
intensity = note_register[msg.note][1]
# fill in color
roll[idx, msg.note, note_on_end_time:note_off_end_time] = intensity
note_register[msg.note] = -1 # reinitialize register
time_counter += msg.time
# TODO : velocity -> done, but not verified
# TODO: Pitch wheel
# TODO: Channel - > Program Changed / Timbre catagory
# TODO: real time scale of roll
# if there is a note not closed at the end of a channel, close it
for key, data in enumerate(note_register):
if data != -1:
note_on_end_time = data[0]
intensity = data[1]
# print(key, note_on_end_time)
note_off_start_time = time_counter // sr
roll[idx, key, note_on_end_time:] = intensity
note_register[idx] = -1
return roll
def get_roll_image(self):
roll = self.get_roll()
plt.ioff()
K = 16
transparent = colorConverter.to_rgba('black')
colors = [mpl.colors.to_rgba(mpl.colors.hsv_to_rgb((i / K, 1, 1)), alpha=1) for i in range(K)]
cmaps = [mpl.colors.LinearSegmentedColormap.from_list('my_cmap', [transparent, colors[i]], 128) for i in
range(K)]
for i in range(K):
cmaps[i]._init() # create the _lut array, with rgba values
# create your alpha array and fill the colormap with them.
# here it is progressive, but you can create whathever you want
alphas = np.linspace(0, 1, cmaps[i].N + 3)
cmaps[i]._lut[:, -1] = alphas
fig = plt.figure(figsize=(4, 3))
a1 = fig.add_subplot(111)
a1.axis("equal")
a1.set_facecolor("black")
array = []
for i in range(K):
try:
img = a1.imshow(roll[i], interpolation='nearest', cmap=cmaps[i], aspect='auto')
array.append(img.get_array())
except IndexError:
pass
return array
def draw_roll(self):
roll = self.get_roll()
# build and set fig obj
plt.ioff()
fig = plt.figure(figsize=(4, 3))
a1 = fig.add_subplot(111)
a1.axis("equal")
a1.set_facecolor("black")
# change unit of time axis from tick to second
tick = self.get_total_ticks()
second = mido.tick2second(tick, self.ticks_per_beat, self.get_tempo())
print(second)
if second > 10:
x_label_period_sec = second // 10
else:
x_label_period_sec = second / 10 # ms
print(x_label_period_sec)
x_label_interval = mido.second2tick(x_label_period_sec, self.ticks_per_beat, self.get_tempo()) / self.sr
print(x_label_interval)
plt.xticks([int(x * x_label_interval) for x in range(20)], [round(x * x_label_period_sec, 2) for x in range(20)])
# change scale and label of y axis
plt.yticks([y*16 for y in range(8)], [y*16 for y in range(8)])
# build colors
channel_nb = 16
transparent = colorConverter.to_rgba('black')
colors = [mpl.colors.to_rgba(mpl.colors.hsv_to_rgb((i / channel_nb, 1, 1)), alpha=1) for i in range(channel_nb)]
cmaps = [mpl.colors.LinearSegmentedColormap.from_list('my_cmap', [transparent, colors[i]], 128) for i in
range(channel_nb)]
# build color maps
for i in range(channel_nb):
cmaps[i]._init()
# create your alpha array and fill the colormap with them.
alphas = np.linspace(0, 1, cmaps[i].N + 3)
# create the _lut array, with rgba values
cmaps[i]._lut[:, -1] = alphas
# draw piano roll and stack image on a1
for i in range(channel_nb):
try:
a1.imshow(roll[i], origin="lower", interpolation='nearest', cmap=cmaps[i], aspect='auto')
except IndexError:
pass
# draw color bar
colors = [mpl.colors.hsv_to_rgb((i / channel_nb, 1, 1)) for i in range(channel_nb)]
cmap = mpl.colors.LinearSegmentedColormap.from_list('my_cmap', colors, 16)
a2 = fig.add_axes([0.05, 0.80, 0.9, 0.15])
cbar = mpl.colorbar.ColorbarBase(a2, cmap=cmap,
orientation='horizontal',
ticks=list(range(16)))
# show piano roll
plt.draw()
plt.ion()
plt.show(block=True)
def get_tempo(self):
try:
return self.meta["set_tempo"]["tempo"]
except:
return 500000
def get_total_ticks(self):
max_ticks = 0
for channel in range(16):
ticks = sum(msg.time for msg in self.events[channel])
if ticks > max_ticks:
max_ticks = ticks
return max_ticks
if __name__ == "__main__":
mid = MidiFile("test_file/1.mid")
# get the list of all events
# events = mid.get_events()
# get the np array of piano roll image
roll = mid.get_roll()
# draw piano roll by pyplot
mid.draw_roll()
| mit |
lunyang/Data-Science-45min-Intros | choosing-k-in-kmeans/3d-example.py | 25 | 2925 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
"""
This script is designed to run inline (%run 3d-example.py) in
the corresponding IPython notebook. It generates a 3d scatter
plot using scikit-learn data generation and with a number of
samples and clusters determined by the variables near the top.
"""
import argparse
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.datasets import make_blobs
import seaborn as sns
from gap_stats import gap_statistics
from gap_stats import plot_gap_statistics
def make_example_plot(args):
"""
Create artificial data (blobs) and color them according to the
appropriate blob center.
"""
# read args
samples = args.samples
clusters = args.clusters
# create some data
X, y = make_blobs(n_samples=samples,
centers=clusters,
n_features=3,
# increase variance for illustration
cluster_std=1.5,
# fix random_state if you believe in determinism
#random_state=42
)
# seaborn display settings
sns.set(style='whitegrid', palette=sns.color_palette("Set2", clusters))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(clusters):
# for each center, add data to the figure w/ appropriate label
ax.plot(X[y==i,0],
X[y==i,1],
X[y==i,2],
'o',
alpha=0.6,
label='cluster {}'.format(i)
)
ax.set_title('{} labeled clusters (ground truth)'.format(clusters))
ax.legend(loc='upper left')
# seaborn settings - no, really set these things this time, please
sns.set(style='whitegrid', palette=sns.color_palette("Set2", clusters))
#plt.show()
# potentially return the data for later use
data = None
if args.gap:
data = (X, y)
return data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s","--samples"
, dest="samples"
, type=int
, default=100
)
parser.add_argument("-c","--clusters"
, dest="clusters"
, type=int
, default=5
)
parser.add_argument("-g","--gap"
, dest="gap"
, type=bool
, default=False
)
args = parser.parse_args()
data = make_example_plot(args)
if args.gap:
# i just really prefer the dark theme
sns.set(style='darkgrid', palette='deep')
# unpack
X, y = data
# run the gap statistic algorithm
gaps, errs, difs = gap_statistics(X, ks=range(1, args.clusters+5))
# plot (intended for %matplotlib inline)
plot_gap_statistics(gaps, errs, difs)
| unlicense |
fzalkow/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
almarklein/bokeh | bokeh/tests/test_sources.py | 1 | 3183 | import unittest
from unittest import skipIf
import warnings
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
from bokeh.models.sources import DataSource, ColumnDataSource, ServerDataSource
class TestColumnDataSourcs(unittest.TestCase):
def test_basic(self):
ds = ColumnDataSource()
self.assertTrue(isinstance(ds, DataSource))
def test_init_dict_arg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
def test_init_dict_data_kwarg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data=data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_arg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_data_kwarg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(data=df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
def test_add_with_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], name="foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6], name="bar")
self.assertEquals(name, "bar")
def test_add_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3])
self.assertEquals(name, "Series 0")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_add_with_and_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_remove_exists(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
ds.remove("foo")
self.assertEquals(ds.column_names, [])
def test_remove_exists(self):
with warnings.catch_warnings(record=True) as w:
ds = ColumnDataSource()
ds.remove("foo")
self.assertEquals(ds.column_names, [])
self.assertEquals(len(w), 1)
self.assertEquals(w[0].category, UserWarning)
self.assertEquals(str(w[0].message), "Unable to find column 'foo' in data source")
class TestServerDataSourcs(unittest.TestCase):
def test_basic(self):
ds = ServerDataSource()
self.assertTrue(isinstance(ds, DataSource))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
sandeepgupta2k4/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 28 | 9485 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
| apache-2.0 |
jmmease/pandas | asv_bench/benchmarks/io_sql.py | 7 | 4120 | import sqlalchemy
from .pandas_vb_common import *
import sqlite3
from sqlalchemy import create_engine
#-------------------------------------------------------------------------------
# to_sql
class WriteSQL(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.index = tm.makeStringIndex(10000)
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
def time_fallback(self):
self.df.to_sql('test1', self.con, if_exists='replace')
def time_sqlalchemy(self):
self.df.to_sql('test1', self.engine, if_exists='replace')
#-------------------------------------------------------------------------------
# read_sql
class ReadSQL(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.index = tm.makeStringIndex(10000)
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
self.df.to_sql('test2', self.engine, if_exists='replace')
self.df.to_sql('test2', self.con, if_exists='replace')
def time_read_query_fallback(self):
read_sql_query('SELECT * FROM test2', self.con)
def time_read_query_sqlalchemy(self):
read_sql_query('SELECT * FROM test2', self.engine)
def time_read_table_sqlalchemy(self):
read_sql_table('test2', self.engine)
#-------------------------------------------------------------------------------
# type specific write
class WriteSQLTypes(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.df = DataFrame({'float': randn(10000), 'string': (['foo'] * 10000), 'bool': ([True] * 10000), 'datetime': date_range('2000-01-01', periods=10000, freq='s'), })
self.df.loc[1000:3000, 'float'] = np.nan
def time_string_fallback(self):
self.df[['string']].to_sql('test_string', self.con, if_exists='replace')
def time_string_sqlalchemy(self):
self.df[['string']].to_sql('test_string', self.engine, if_exists='replace')
def time_float_fallback(self):
self.df[['float']].to_sql('test_float', self.con, if_exists='replace')
def time_float_sqlalchemy(self):
self.df[['float']].to_sql('test_float', self.engine, if_exists='replace')
def time_datetime_sqlalchemy(self):
self.df[['datetime']].to_sql('test_datetime', self.engine, if_exists='replace')
#-------------------------------------------------------------------------------
# type specific read
class ReadSQLTypes(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.df = DataFrame({'float': randn(10000), 'datetime': date_range('2000-01-01', periods=10000, freq='s'), })
self.df['datetime_string'] = self.df['datetime'].map(str)
self.df.to_sql('test_type', self.engine, if_exists='replace')
self.df[['float', 'datetime_string']].to_sql('test_type', self.con, if_exists='replace')
def time_datetime_read_and_parse_sqlalchemy(self):
read_sql_table('test_type', self.engine, columns=['datetime_string'], parse_dates=['datetime_string'])
def time_datetime_read_as_native_sqlalchemy(self):
read_sql_table('test_type', self.engine, columns=['datetime'])
def time_float_read_query_fallback(self):
read_sql_query('SELECT float FROM test_type', self.con)
def time_float_read_query_sqlalchemy(self):
read_sql_query('SELECT float FROM test_type', self.engine)
def time_float_read_table_sqlalchemy(self):
read_sql_table('test_type', self.engine, columns=['float'])
| bsd-3-clause |
aminert/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
wanggang3333/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
lyoshiwo/resume_job_matching | Step7_basal_classifier_with_parameter_search.py | 1 | 13815 | # encoding=utf8
from sklearn import cross_validation, grid_search
import pandas as pd
import os
import time
import util
from matplotlib import pyplot as plt
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
tree_test = False
xgb_test = False
cnn_test = False
rf_test = False
lstm_test = False
# max_depth=12 0.433092948718
# rn>200,rh=21 0.537 to 0.541; rh=21,rn=400; 0.542
CV_FLAG = 1
param = {}
param['objective'] = 'multi:softmax'
param['eta'] = 0.03
param['max_depth'] = 6
param['eval_metric'] = 'merror'
param['silent'] = 1
param['min_child_weight'] = 10
param['subsample'] = 0.7
param['colsample_bytree'] = 0.2
param['nthread'] = 4
param['num_class'] = -1
def get_all_by_name(name):
import numpy as np
if os.path.exists(util.features_prefix + name + "_XY.pkl") is False:
print name + 'file does not exist'
exit()
if os.path.exists(util.features_prefix + name + '_XXXYYY.pkl') is False:
[train_X, train_Y] = pd.read_pickle(util.features_prefix + name + '_XY.pkl')
X_train, X_test, y_train, y_test = cross_validation.train_test_split(train_X, train_Y, test_size=0.33,
random_state=0)
X_train, X_validate, y_train, y_validate = cross_validation.train_test_split(X_train, y_train, test_size=0.33,
random_state=0)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_test = np.array(y_test)
X_test = np.array(X_test)
X_validate = np.array(X_validate)
y_validate = np.array(y_validate)
pd.to_pickle([X_train, X_validate, X_test, y_train, y_validate, y_test],
util.features_prefix + name + '_XXXYYY.pkl')
if os.path.exists(util.features_prefix + name + '_XXXYYY.pkl'):
print name
from sklearn.ensemble import RandomForestClassifier
if rf_test is False:
[train_X, train_Y] = pd.read_pickle(util.features_prefix + name + '_XY.pkl')
[X_train, X_validate, X_test, y_train, y_validate, y_test] = pd.read_pickle(
util.features_prefix + name + '_XXXYYY.pkl')
x = np.concatenate([X_train, X_validate], axis=0)
y = np.concatenate([y_train, y_validate], axis=0)
print 'rf'
n_estimator = range(100, 301, 100)
max_depth = range(5, 26, 1)
clf = RandomForestClassifier(n_jobs=4)
parameters = {'n_estimators': n_estimator, 'max_depth': max_depth}
grid_clf = grid_search.GridSearchCV(clf, parameters)
grid_clf.fit(np.array(train_X), np.array(train_Y))
score = grid_clf.grid_scores_
l1 = [1 - x[1] for x in score if x[0]['n_estimators'] == n_estimator[0]]
l2 = [1 - x[1] for x in score if x[0]['n_estimators'] == n_estimator[1]]
l3 = [1 - x[1] for x in score if x[0]['n_estimators'] == n_estimator[2]]
plt.plot(range(5, 26, 1), l1,
'b--')
plt.plot(range(5, 26, 1), l2,
'r.--')
plt.plot(range(5, 26, 1), l3,
'g')
plt.legend((str(n_estimator[0]) + ' estimators', str(n_estimator[1]) + ' estimators',
str(n_estimator[2]) + ' estimators'),
loc=0, shadow=True)
plt.xlabel('max depth of RandomForest')
plt.ylabel('average error rate of 3-fold cross-validation')
plt.grid(True)
plt.show()
exit()
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if xgb_test is False:
[train_X, train_Y] = pd.read_pickle(util.features_prefix + name + '_XY.pkl')
print 'xg'
import xgboost as xgb
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
set_y = set(train_Y)
param["num_class"] = len(set_y)
dtrain = xgb.DMatrix(train_X, label=train_Y)
xgb.cv(param, dtrain, 4, nfold=3, show_progress=True)
if cnn_test is False:
[train_X, train_Y] = pd.read_pickle(util.features_prefix + name + '_XY.pkl')
print 'cnn'
import copy
import numpy as np
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
from keras.layers.convolutional import Convolution1D
label_dict = LabelEncoder().fit(train_Y)
label_num = len(label_dict.classes_)
x = train_X
y = train_Y
train_Y = np_utils.to_categorical(y, label_num)
# x = np.concatenate([X_train, X_validate], axis=0)
X_train = x
X_semantic = np.array(copy.deepcopy(X_train[:, range(95, 475)]))
X_manual = np.array(copy.deepcopy(X_train[:, range(0, 95)]))
X_cluster = np.array(copy.deepcopy(X_train[:, range(475, 545)]))
X_document = np.array(copy.deepcopy(X_train[:, range(545, 547)]))
X_document[:, [0]] = X_document[:, [0]] + train_X[:, [-1]].max()
dic_num_cluster = X_cluster.max()
dic_num_manual = train_X.max()
dic_num_document = X_document[:, [0]].max()
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.core import Merge
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.recurrent import LSTM
X_semantic = X_semantic.reshape(X_semantic.shape[0], 10, -1)
X_semantic_1 = np.zeros((X_semantic.shape[0], X_semantic.shape[2], X_semantic.shape[1]))
for i in range(int(X_semantic.shape[0])):
X_semantic_1[i] = np.transpose(X_semantic[i])
model_semantic = Sequential()
model_lstm = Sequential()
model_lstm.add(LSTM(output_dim=30, input_shape=X_semantic_1.shape[1:], go_backwards=True))
model_semantic.add(Convolution1D(nb_filter=32,
filter_length=2,
border_mode='valid',
activation='relu', input_shape=X_semantic_1.shape[1:]))
# model_semantic.add(MaxPooling1D(pool_length=2))
model_semantic.add(Convolution1D(nb_filter=8,
filter_length=2,
border_mode='valid',
activation='relu'))
# model_semantic.add(MaxPooling1D(pool_length=2))
model_semantic.add(Flatten())
# we use standard max pooling (halving the output of the previous layer):
model_manual = Sequential()
model_manual.add(Embedding(input_dim=dic_num_manual + 1, output_dim=20, input_length=X_manual.shape[1]))
# model_manual.add(Convolution1D(nb_filter=2,
# filter_length=2,
# border_mode='valid',
# activation='relu'))
# model_manual.add(MaxPooling1D(pool_length=2))
# model_manual.add(Convolution1D(nb_filter=8,
# filter_length=2,
# border_mode='valid',
# activation='relu'))
# model_manual.add(MaxPooling1D(pool_length=2))
model_manual.add(Flatten())
model_document = Sequential()
model_document.add(
Embedding(input_dim=dic_num_document + 1, output_dim=2, input_length=X_document.shape[1]))
model_document.add(Flatten())
model_cluster = Sequential()
model_cluster.add(Embedding(input_dim=dic_num_cluster + 1, output_dim=5, input_length=X_cluster.shape[1]))
model_cluster.add(Flatten())
model = Sequential()
# model = model_cluster
model.add(Merge([model_document, model_cluster, model_manual, model_semantic], mode='concat',
concat_axis=1))
model.add(Dense(512))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Dropout(0.5))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(label_num))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
# model.fit(X_cluster_1, train_Ymetrics=['accuracy'], batch_size=100,
# nb_epoch=100, validation_split=0.33, verbose=1)
model.fit([X_document, X_cluster, X_manual, X_semantic_1], train_Y,
batch_size=100, nb_epoch=15, validation_split=0.33)
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if lstm_test is False:
import numpy as np
[train_X, train_Y] = pd.read_pickle(util.features_prefix + name + '_XY.pkl')
print 'lstm'
import copy
import numpy as np
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
from keras.layers.convolutional import Convolution1D
label_dict = LabelEncoder().fit(train_Y)
label_num = len(label_dict.classes_)
train_Y = np_utils.to_categorical(train_Y, label_num)
# x = np.concatenate([X_train, X_validate], axis=0)
X_train = train_X
X_semantic = np.array(copy.deepcopy(X_train[:, range(95, 475)]))
X_manual = np.array(copy.deepcopy(X_train[:, range(0, 95)]))
X_cluster = np.array(copy.deepcopy(X_train[:, range(475, 545)]))
X_document = np.array(copy.deepcopy(X_train[:, range(545, 547)]))
X_document[:, [0]] = X_document[:, [0]] + train_X[:, [-1]].max()
dic_num_cluster = X_cluster.max()
dic_num_manual = train_X.max()
dic_num_document = X_document[:, [0]].max()
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.core import Merge
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.recurrent import LSTM
X_semantic = X_semantic.reshape(X_semantic.shape[0], 10, -1)
X_semantic_1 = np.zeros((X_semantic.shape[0], X_semantic.shape[2], X_semantic.shape[1]))
for i in range(int(X_semantic.shape[0])):
X_semantic_1[i] = np.transpose(X_semantic[i])
model_semantic = Sequential()
model_lstm = Sequential()
model_lstm.add(LSTM(output_dim=30, input_shape=X_semantic_1.shape[1:], go_backwards=True))
model_semantic.add(Convolution1D(nb_filter=32,
filter_length=2,
border_mode='valid',
activation='relu', input_shape=X_semantic_1.shape[1:]))
# model_semantic.add(MaxPooling1D(pool_length=2))
model_semantic.add(Convolution1D(nb_filter=8,
filter_length=2,
border_mode='valid',
activation='relu'))
# model_semantic.add(MaxPooling1D(pool_length=2))
model_semantic.add(Flatten())
# we use standard max pooling (halving the output of the previous layer):
model_manual = Sequential()
model_manual.add(Embedding(input_dim=dic_num_manual + 1, output_dim=20, input_length=X_manual.shape[1]))
model_manual.add(Flatten())
model_document = Sequential()
model_document.add(
Embedding(input_dim=dic_num_document + 1, output_dim=2, input_length=X_document.shape[1]))
model_document.add(Flatten())
model_cluster = Sequential()
model_cluster.add(Embedding(input_dim=dic_num_cluster + 1, output_dim=5, input_length=X_cluster.shape[1]))
model_cluster.add(Flatten())
model = Sequential()
# model = model_cluster
model.add(Merge([model_document, model_cluster, model_manual, model_lstm], mode='concat',
concat_axis=1))
model.add(Dense(512))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Dropout(0.5))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(label_num))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# model.fit(X_cluster_1, train_Y, batch_size=100,
# nb_epoch=100, validation_split=0.33, verbose=1)
model.fit([X_document, X_cluster, X_manual, X_semantic_1], train_Y,
batch_size=100, nb_epoch=15, validation_split=0.33)
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if __name__ == "__main__":
for name in ['degree', 'salary', 'size', 'position']:
get_all_by_name(name)
| apache-2.0 |
alexsavio/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/io/test_orc.py | 6 | 6482 | """ test orc compat """
import datetime
import os
import numpy as np
import pytest
import pandas as pd
from pandas import read_orc
import pandas._testing as tm
pytest.importorskip("pyarrow", minversion="0.13.0")
pytest.importorskip("pyarrow.orc")
pytestmark = pytest.mark.filterwarnings(
"ignore:RangeIndex.* is deprecated:DeprecationWarning"
)
@pytest.fixture
def dirpath(datapath):
return datapath("io", "data", "orc")
def test_orc_reader_empty(dirpath):
columns = [
"boolean1",
"byte1",
"short1",
"int1",
"long1",
"float1",
"double1",
"bytes1",
"string1",
]
dtypes = [
"bool",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
"object",
"object",
]
expected = pd.DataFrame(index=pd.RangeIndex(0))
for colname, dtype in zip(columns, dtypes):
expected[colname] = pd.Series(dtype=dtype)
inputfile = os.path.join(dirpath, "TestOrcFile.emptyFile.orc")
got = read_orc(inputfile, columns=columns)
tm.assert_equal(expected, got)
def test_orc_reader_basic(dirpath):
data = {
"boolean1": np.array([False, True], dtype="bool"),
"byte1": np.array([1, 100], dtype="int8"),
"short1": np.array([1024, 2048], dtype="int16"),
"int1": np.array([65536, 65536], dtype="int32"),
"long1": np.array([9223372036854775807, 9223372036854775807], dtype="int64"),
"float1": np.array([1.0, 2.0], dtype="float32"),
"double1": np.array([-15.0, -5.0], dtype="float64"),
"bytes1": np.array([b"\x00\x01\x02\x03\x04", b""], dtype="object"),
"string1": np.array(["hi", "bye"], dtype="object"),
}
expected = pd.DataFrame.from_dict(data)
inputfile = os.path.join(dirpath, "TestOrcFile.test1.orc")
got = read_orc(inputfile, columns=data.keys())
tm.assert_equal(expected, got)
def test_orc_reader_decimal(dirpath):
from decimal import Decimal
# Only testing the first 10 rows of data
data = {
"_col0": np.array(
[
Decimal("-1000.50000"),
Decimal("-999.60000"),
Decimal("-998.70000"),
Decimal("-997.80000"),
Decimal("-996.90000"),
Decimal("-995.10000"),
Decimal("-994.11000"),
Decimal("-993.12000"),
Decimal("-992.13000"),
Decimal("-991.14000"),
],
dtype="object",
)
}
expected = pd.DataFrame.from_dict(data)
inputfile = os.path.join(dirpath, "TestOrcFile.decimal.orc")
got = read_orc(inputfile).iloc[:10]
tm.assert_equal(expected, got)
def test_orc_reader_date_low(dirpath):
data = {
"time": np.array(
[
"1900-05-05 12:34:56.100000",
"1900-05-05 12:34:56.100100",
"1900-05-05 12:34:56.100200",
"1900-05-05 12:34:56.100300",
"1900-05-05 12:34:56.100400",
"1900-05-05 12:34:56.100500",
"1900-05-05 12:34:56.100600",
"1900-05-05 12:34:56.100700",
"1900-05-05 12:34:56.100800",
"1900-05-05 12:34:56.100900",
],
dtype="datetime64[ns]",
),
"date": np.array(
[
datetime.date(1900, 12, 25),
datetime.date(1900, 12, 25),
datetime.date(1900, 12, 25),
datetime.date(1900, 12, 25),
datetime.date(1900, 12, 25),
datetime.date(1900, 12, 25),
datetime.date(1900, 12, 25),
datetime.date(1900, 12, 25),
datetime.date(1900, 12, 25),
datetime.date(1900, 12, 25),
],
dtype="object",
),
}
expected = pd.DataFrame.from_dict(data)
inputfile = os.path.join(dirpath, "TestOrcFile.testDate1900.orc")
got = read_orc(inputfile).iloc[:10]
tm.assert_equal(expected, got)
def test_orc_reader_date_high(dirpath):
data = {
"time": np.array(
[
"2038-05-05 12:34:56.100000",
"2038-05-05 12:34:56.100100",
"2038-05-05 12:34:56.100200",
"2038-05-05 12:34:56.100300",
"2038-05-05 12:34:56.100400",
"2038-05-05 12:34:56.100500",
"2038-05-05 12:34:56.100600",
"2038-05-05 12:34:56.100700",
"2038-05-05 12:34:56.100800",
"2038-05-05 12:34:56.100900",
],
dtype="datetime64[ns]",
),
"date": np.array(
[
datetime.date(2038, 12, 25),
datetime.date(2038, 12, 25),
datetime.date(2038, 12, 25),
datetime.date(2038, 12, 25),
datetime.date(2038, 12, 25),
datetime.date(2038, 12, 25),
datetime.date(2038, 12, 25),
datetime.date(2038, 12, 25),
datetime.date(2038, 12, 25),
datetime.date(2038, 12, 25),
],
dtype="object",
),
}
expected = pd.DataFrame.from_dict(data)
inputfile = os.path.join(dirpath, "TestOrcFile.testDate2038.orc")
got = read_orc(inputfile).iloc[:10]
tm.assert_equal(expected, got)
def test_orc_reader_snappy_compressed(dirpath):
data = {
"int1": np.array(
[
-1160101563,
1181413113,
2065821249,
-267157795,
172111193,
1752363137,
1406072123,
1911809390,
-1308542224,
-467100286,
],
dtype="int32",
),
"string1": np.array(
[
"f50dcb8",
"382fdaaa",
"90758c6",
"9e8caf3f",
"ee97332b",
"d634da1",
"2bea4396",
"d67d89e8",
"ad71007e",
"e8c82066",
],
dtype="object",
),
}
expected = pd.DataFrame.from_dict(data)
inputfile = os.path.join(dirpath, "TestOrcFile.testSnappy.orc")
got = read_orc(inputfile).iloc[:10]
tm.assert_equal(expected, got)
| bsd-3-clause |
Transkribus/TranskribusDU | TranskribusDU/graph/Transformer_Logit.py | 1 | 8399 | # -*- coding: utf-8 -*-
"""
Node and edge feature transformers to extract features for PageXml based on Logit classifiers
Copyright Xerox(C) 2016 JL. Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union�s Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier #for multilabel classif
from sklearn.model_selection import GridSearchCV #0.18.1 REQUIRES NUMPY 1.12.1 or more recent
from common.trace import traceln
from .Transformer import Transformer
from .Transformer_PageXml import NodeTransformerTextEnclosed
dGridSearch_CONF = {'C':[0.1, 0.5, 1.0, 2.0] } #Grid search parameters for Logit training
dGridSearch_CONF = {'C':[0.01, 0.1, 1.0, 2.0] } #Grid search parameters for Logit training
DEBUG=0
#------------------------------------------------------------------------------------------------------
class NodeTransformerLogit(Transformer):
"""
we will get a list of blocks belonging to N classes.
we train a logit classifier for those classes, as well as a multilabel classifier for the neighor of those classes
the built feature vector is 2*N long
"""
dGridSearch_LR_conf = dGridSearch_CONF
def __init__(self, nbClass=None, n_feat_node=1000, t_ngrams_node=(2,4), b_node_lc=False, n_jobs=1):
"""
input:
- number of classes
- number of ngram
- ngram min/max size
- lowercase or not
- njobs when fitting the logit using grid search
if n_feat_node is negative, or 0, or None, we use all possible ngrams
"""
Transformer.__init__(self)
self.nbClass = nbClass
self.n_feat_node, self.t_ngrams_node, self.b_node_lc = n_feat_node, t_ngrams_node, b_node_lc
self.n_jobs = n_jobs
self.text_pipeline = None # feature extractor
self.mdl_main = None # the main model predicting among the nbClass classes
self.mdl_neighbor = None # the neighborhood model predicting zero to many of the classes
def fit(self, X, y=None):
"""
This tranformer needs the graphs to be fitted properly - see fitByGraph
"""
return self
def fitByGraph(self, lGraph, lAllNode=None):
"""
we need to train 2 Logit: one to predict the node class, another to predict the class of the neighborhhod
"""
self.text_pipeline = Pipeline([ ('selector' , NodeTransformerTextEnclosed())
, ('tf' , TfidfVectorizer(lowercase=self.b_node_lc
#, max_features=10000
, analyzer = 'char'
, ngram_range=self.t_ngrams_node)) #(2,6)), #we can use it separately from the pipleline once fitted
# , ('word_selector' , SelectKBest(chi2, k=self.n_feat_node))
])
# the y
if lAllNode==None: lAllNode = [nd for g in lGraph for nd in g.lNode]
y = np.array([nd.cls for nd in lAllNode], dtype=np.int)
if self.nbClass != len(np.unique(y)):
traceln("Classes seen are: %s"%np.unique(y).tolist())
traceln(self.nbClass)
raise ValueError("ERROR: some class is not represented in the training set")
#fitting the textual feature extractor
self.text_pipeline.fit(lAllNode, y)
#extracting textual features
x = self.text_pipeline.transform(lAllNode)
#creating and training the main logit model
lr = LogisticRegression(class_weight='balanced')
self.mdl_main = GridSearchCV(lr , self.dGridSearch_LR_conf, refit=True, n_jobs=self.n_jobs)
self.mdl_main.fit(x, y)
del y
if DEBUG: print(self.mdl_main)
#now fit a multiclass multilabel logit to predict if a node is neighbor with at least one node of a certain class, for each class
#Shape = (nb_tot_nodes x nb_tot_labels)
y = np.vstack([g.getNeighborClassMask() for g in lGraph]) #we get this from the graph object.
assert y.shape[0] == len(lAllNode)
lr = LogisticRegression(class_weight='balanced')
gslr = GridSearchCV(lr , self.dGridSearch_LR_conf, refit=True, n_jobs=self.n_jobs)
self.mdl_neighbor = OneVsRestClassifier(gslr, n_jobs=self.n_jobs)
self.mdl_neighbor.fit(x, y)
del x, y
if DEBUG: print(self.mdl_neighbor)
return self
def transform(self, lNode):
"""
return the 2 logit scores
"""
a = np.zeros( ( len(lNode), 3*self.nbClass ), dtype=np.float64) #for each class: is_of_class? is_neighbor_of_class on same page or accross page?
x = self.text_pipeline.transform(lNode)
a[...,0:self.nbClass] = self.mdl_main .predict_proba(x)
a[..., self.nbClass:3*self.nbClass] = self.mdl_neighbor .predict_proba(x)
# for i, nd in enumerate(lNode):
# print i, nd, a[i]
if DEBUG: print(a)
return a
# def testEco(self,lX, lY):
# """
# we test 2 Logit: one to predict the node class, another to predict the class of the neighborhood
# and return a list of TestReport objects
# [ ClassPredictor_Test_Report
# , SamePageNeighborClassPredictor_Test_Report for each class
# , CrossPageNeighborClassPredictor_Test_Report for each class
# ]
# """
# loTstRpt = []
# ZZZZZ
# #extracting textual features
# X = self.text_pipeline.transform(lAllNode)
#
# # the Y
# Y = np.array([nd.cls for nd in lAllNode], dtype=np.int)
# Y_pred = self.mdl_main.predict(X)
# oTstRptMain = TestReportConfusion.newFromYYpred("TransformerLogit_main", Y_pred, Y, map(str, range(self.nbClass)))
# loTstRpt.append(oTstRptMain)
#
# #the Y for neighboring
# Y = np.vstack([g.getNeighborClassMask() for g in lGraph]) #we get this from the graph object.
# Y_pred = self.mdl_neighbor.predict(X)
# nbCol = Y_pred.shape[1]
# lsClassName = lGraph[0].getNeighborClassNameList()
# assert nbCol == len(lsClassName)
# for i, sClassName in enumerate(lsClassName):
# oTstRpt = TestReportConfusion.newFromYYpred(sClassName, Y_pred[:,i], Y[:,i], ["no", "yes"])
# loTstRpt.append(oTstRpt)
#
# return loTstRpt
#------------------------------------------------------------------------------------------------------
class EdgeTransformerLogit(Transformer):
"""
we will get a list of edges belonging to N classes.
we train a logit classifier for those classes, as well as a multilabel classifier for the neighor of those classes
the built feature vector is 2*N long
"""
dGridSearch_LR_conf = dGridSearch_CONF
def __init__(self, nbClass, ndTrnsfLogit):
"""
input:
- number of classes
- number of ngram
- ngram min/max size
- lowercase or not
- njobs when fitting the logit using grid search
if n_feat_edge is negative, or 0, or None, we use all possible ngrams
"""
Transformer.__init__(self)
self.nbClass = nbClass
self.transfNodeLogit = ndTrnsfLogit #fitted node transformer
def transform(self, lEdge, bMirrorPage=True):
"""
return the 2 logit scores
"""
aA = self.transfNodeLogit.transform( [edge.A for edge in lEdge] )
aB = self.transfNodeLogit.transform( [edge.B for edge in lEdge] )
a = np.hstack([aA, aB])
del aA, aB
assert a.shape == (len(lEdge), 2 * 3 * self.nbClass) #src / target nodes, same_page/cross_page_neighbors/class
return a
| bsd-3-clause |
jenhantao/nuclearReceptorOverlap | makeSummaryPlots.py | 1 | 6388 | # given a group stats file, produce simple plots demonstrating the distribution of scores across all nodes
# inputs: group stats file, output file name
# outputs: plots visualizing the group stats
import sys
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import collections
import scipy
from scipy import stats
def createPeakSummaryPlots(inputPath,outPath):
# read in inputs
with open(inputPath) as f:
data = f.readlines()
factorFrequencyHash = {} # key - each factor; value: number of times each factor appears in a peak
groupPeaksHash = {}# key - each group name, value: number of merged regions that appear in the peak
groupComponentsHash = {}
for line in data[2:]:
if "###" in line:
break
tokens = line.strip().split("\t")
group = tokens[0]
#groupPeaksHash[group] = int(tokens[2])
groupPeaksHash[group] = int(tokens[3]) # use total instead of peaks unique to the group
groupTokens = set(group[1:-1].split(", "))
groupComponentsHash[group] = groupTokens
# add in individual factors
for token in groupTokens:
if token in factorFrequencyHash:
factorFrequencyHash[token] += 1
else:
factorFrequencyHash[token] = 1
factorIndexHash = {}
counter = 1
for factor in sorted(list(factorFrequencyHash.keys())):
factorIndexHash[factor] = str(counter)
counter += 1
# plot the log number of peaks per group/factor
#plt.hist(groupPeaksHash.values())
# fit a normal distribution
sortedValues = sorted(map(math.log,groupPeaksHash.values()))
fit = stats.norm.pdf(sortedValues, np.mean(sortedValues), np.std(sortedValues))
plt.plot(sortedValues,fit,'-o')
plt.hist(map(math.log, map(float,groupPeaksHash.values())),normed=True)
plt.xlabel("Number of Peaks (ln)")
plt.ylabel("Frequency")
plt.title("All Factors/Groups VS Peaks")
plt.savefig(outPath+"/allFactorsGroups_vs_mergedRegions_log.png")
plt.close()
# plot the number of peaks per group/factor
from scipy import cluster
sortedValues = sorted(groupPeaksHash.values())
centroid = sorted(cluster.vq.kmeans(np.array(sortedValues), 3)[0])[1]
std = np.std(sortedValues)
mean = np.mean(sortedValues)
sortedValues = [x for x in sortedValues if abs(x-centroid)<=std/2.0]
# fit a poisson distribution
param = np.mean(sortedValues)
fit = stats.poisson.pmf(sortedValues, param)
plt.plot(sortedValues,fit,'-o')
plt.hist(sortedValues, normed=True, bins=10)
plt.xlabel("Number of Peaks")
plt.ylabel("Frequency")
plt.title("All Factors/Groups VS Peaks")
plt.savefig(outPath+"/allFactorsGroups_vs_mergedRegions.png")
plt.close()
# plot the number of times a factor appears in a group
factorList = []
for factor in factorFrequencyHash:
factorList.append((factor, factorFrequencyHash[factor]))
factorList.sort(key=lambda x: (x[1],x[0]))
factorLabels = [x[0] for x in factorList]
factorFrequencies = [x[1] for x in factorList]
plt.bar(range(len(factorFrequencies)),factorFrequencies)
plt.ylabel("Number of Groups")
plt.xlabel("Factor")
plt.xticks(range(len(factorLabels)), factorLabels, rotation=90,fontsize=8)
plt.title("Factors vs Number of Ocurrences in a Group")
plt.savefig(outPath+"/factors_vs_groupOcurrence.png")
plt.close()
# plot the size of a group vs the number of merged regions
groupList = []
for group in groupPeaksHash:
groupList.append((group, len(groupComponentsHash[group]), groupPeaksHash[group]))
sizes = [x[1] for x in groupList]
numPeaks = [x[2] for x in groupList]
numPeaks = map(math.log,map(float,numPeaks))
plt.scatter(sizes,numPeaks)
plt.xlabel("Number of Factors in Group")
plt.ylabel("Number of Peaks (ln)")
plt.title("Number of Factors in Group VS Number of Peaks")
plt.savefig(outPath+"/numFactors_vs_numPeaks.png")
plt.close()
def createMotifSummaryPlots(inputPath, outputPath):
# read in input
with open(inputPath) as f:
data = f.readlines()
factorFrequencyHash = {} # key - each factor; value: number of times each factor appears in a peak
groupPeaksHash = {}# key - each group name, value: number of merged regions that appear in the peak
groupComponentsHash = {}
start = 3
for line in data[1:]:
if not "###" in line:
start += 1
else:
break
targetFracs = []
backgroundFracs = []
p_vals = []
std_targetFracs = []
std_backgroundFracs = []
std_p_vals = []
numMotifs = []
groupNumbers = []
for line in data[start:]:
tokens = line.strip().split("\t")
groupNumbers.append(tokens[0])
targetFracs.append(float(tokens[1]))
backgroundFracs.append(float(tokens[2]))
p_vals.append(float(tokens[3]))
std_targetFracs.append(float(tokens[4]))
std_backgroundFracs.append(float(tokens[5]))
std_p_vals.append(float(tokens[6]))
numMotifs.append(int(tokens[7]))
# plot the average target fractions per group
plt.bar(range(len(targetFracs)),targetFracs)
plt.hlines(float(math.fsum(targetFracs))/float(len(targetFracs)),0,len(targetFracs))
plt.xlabel("Group")
plt.ylabel("Target Fraction")
plt.title("Average Motif Target Fraction per Group")
plt.savefig(outputPath+"averageMotifTargetFraction.png")
plt.close()
# plot the average background fractions per group
plt.bar(range(len(backgroundFracs)),backgroundFracs)
plt.hlines(float(math.fsum(backgroundFracs))/float(len(backgroundFracs)),0, len(backgroundFracs))
plt.xlabel("Group")
plt.ylabel("Background Fraction")
plt.title("Average Motif Background Fraction per Group")
plt.savefig(outputPath+"averageMotifBackgroundFraction.png")
plt.close()
# plot the average p-value per group
plt.bar(range(len(p_vals)),p_vals)
plt.hlines(float(math.fsum(p_vals))/float(len(p_vals)),0, len(p_vals))
plt.xlabel("Group")
plt.ylabel("P-value")
plt.title("Average Motif p-value per Group")
plt.savefig(outputPath+"averageMotifPVals.png")
plt.close()
# plot the average number of motifs per group
plt.bar(range(len(numMotifs)),numMotifs)
plt.hlines(float(math.fsum(numMotifs))/float(len(numMotifs)),0, len(numMotifs))
plt.xlabel("Group")
plt.ylabel("Number of Motifs")
plt.title("Average Number of Motifs per Group")
plt.savefig(outputPath+"averageNumberofMotifs.png")
plt.close()
if __name__ == "__main__":
createPeakSummaryPlots(sys.argv[1],sys.argv[2])
#createMotifSummaryPlots(sys.argv[1],sys.argv[2])
| mit |
temasek/android_external_chromium_org | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | 61 | 2538 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows
# but it is not clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/tests/test_contour.py | 6 | 8038 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import datetime
import numpy as np
from matplotlib import mlab
from matplotlib.testing.decorators import cleanup, image_comparison
from matplotlib import pyplot as plt
from nose.tools import assert_equal, assert_raises
import warnings
import re
@cleanup
def test_contour_shape_1d_valid():
x = np.arange(10)
y = np.arange(9)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.contour(x, y, z)
@cleanup
def test_contour_shape_2d_valid():
x = np.arange(10)
y = np.arange(9)
xg, yg = np.meshgrid(x, y)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.contour(xg, yg, z)
@cleanup
def test_contour_shape_mismatch_1():
x = np.arange(9)
y = np.arange(9)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(x, y, z)
except TypeError as exc:
assert exc.args[0] == 'Length of x must be number of columns in z.'
@cleanup
def test_contour_shape_mismatch_2():
x = np.arange(10)
y = np.arange(10)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(x, y, z)
except TypeError as exc:
assert exc.args[0] == 'Length of y must be number of rows in z.'
@cleanup
def test_contour_shape_mismatch_3():
x = np.arange(10)
y = np.arange(10)
xg, yg = np.meshgrid(x, y)
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(xg, y, z)
except TypeError as exc:
assert exc.args[0] == 'Number of dimensions of x and y should match.'
try:
ax.contour(x, yg, z)
except TypeError as exc:
assert exc.args[0] == 'Number of dimensions of x and y should match.'
@cleanup
def test_contour_shape_mismatch_4():
g = np.random.random((9, 10))
b = np.random.random((9, 9))
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(b, g, z)
except TypeError as exc:
print(exc.args[0])
assert re.match(
r'Shape of x does not match that of z: ' +
r'found \(9L?, 9L?\) instead of \(9L?, 10L?\)\.',
exc.args[0]) is not None
try:
ax.contour(g, b, z)
except TypeError as exc:
assert re.match(
r'Shape of y does not match that of z: ' +
r'found \(9L?, 9L?\) instead of \(9L?, 10L?\)\.',
exc.args[0]) is not None
@cleanup
def test_contour_shape_invalid_1():
x = np.random.random((3, 3, 3))
y = np.random.random((3, 3, 3))
z = np.random.random((9, 10))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(x, y, z)
except TypeError as exc:
assert exc.args[0] == 'Inputs x and y must be 1D or 2D.'
@cleanup
def test_contour_shape_invalid_2():
x = np.random.random((3, 3, 3))
y = np.random.random((3, 3, 3))
z = np.random.random((3, 3, 3))
fig = plt.figure()
ax = fig.add_subplot(111)
try:
ax.contour(x, y, z)
except TypeError as exc:
assert exc.args[0] == 'Input z must be a 2D array.'
@image_comparison(baseline_images=['contour_manual_labels'])
def test_contour_manual_labels():
x, y = np.meshgrid(np.arange(0, 10), np.arange(0, 10))
z = np.max(np.dstack([abs(x), abs(y)]), 2)
plt.figure(figsize=(6, 2))
cs = plt.contour(x, y, z)
pts = np.array([(1.5, 3.0), (1.5, 4.4), (1.5, 6.0)])
plt.clabel(cs, manual=pts)
@image_comparison(baseline_images=['contour_labels_size_color'],
extensions=['png'], remove_text=True)
def test_contour_manual_labels():
x, y = np.meshgrid(np.arange(0, 10), np.arange(0, 10))
z = np.max(np.dstack([abs(x), abs(y)]), 2)
plt.figure(figsize=(6, 2))
cs = plt.contour(x, y, z)
pts = np.array([(1.5, 3.0), (1.5, 4.4), (1.5, 6.0)])
plt.clabel(cs, manual=pts, fontsize='small', colors=('r', 'g'))
@image_comparison(baseline_images=['contour_manual_colors_and_levels'],
extensions=['png'], remove_text=True)
def test_given_colors_levels_and_extends():
_, axes = plt.subplots(2, 4)
data = np.arange(12).reshape(3, 4)
colors = ['red', 'yellow', 'pink', 'blue', 'black']
levels = [2, 4, 8, 10]
for i, ax in enumerate(axes.flatten()):
plt.sca(ax)
filled = i % 2 == 0.
extend = ['neither', 'min', 'max', 'both'][i // 2]
if filled:
last_color = -1 if extend in ['min', 'max'] else None
plt.contourf(data, colors=colors[:last_color], levels=levels,
extend=extend)
else:
last_level = -1 if extend == 'both' else None
plt.contour(data, colors=colors, levels=levels[:last_level],
extend=extend)
plt.colorbar()
@image_comparison(baseline_images=['contour_datetime_axis'],
extensions=['png'], remove_text=False)
def test_contour_datetime_axis():
fig = plt.figure()
fig.subplots_adjust(hspace=0.4, top=0.98, bottom=.15)
base = datetime.datetime(2013, 1, 1)
x = np.array([base + datetime.timedelta(days=d) for d in range(20)])
y = np.arange(20)
z1, z2 = np.meshgrid(np.arange(20), np.arange(20))
z = z1 * z2
plt.subplot(221)
plt.contour(x, y, z)
plt.subplot(222)
plt.contourf(x, y, z)
x = np.repeat(x[np.newaxis], 20, axis=0)
y = np.repeat(y[:, np.newaxis], 20, axis=1)
plt.subplot(223)
plt.contour(x, y, z)
plt.subplot(224)
plt.contourf(x, y, z)
for ax in fig.get_axes():
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
@image_comparison(baseline_images=['contour_test_label_transforms'],
extensions=['png'], remove_text=True)
def test_labels():
# Adapted from pylab_examples example code: contour_demo.py
# see issues #2475, #2843, and #2818 for explanation
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
fig, ax = plt.subplots(1, 1)
CS = ax.contour(X, Y, Z)
disp_units = [(216, 177), (359, 290), (521, 406)]
data_units = [(-2, .5), (0, -1.5), (2.8, 1)]
CS.clabel()
for x, y in data_units:
CS.add_label_near(x, y, inline=True, transform=None)
for x, y in disp_units:
CS.add_label_near(x, y, inline=True, transform=False)
@image_comparison(baseline_images=['contour_corner_mask_False',
'contour_corner_mask_True'],
extensions=['png'], remove_text=True)
def test_corner_mask():
n = 60
mask_level = 0.95
noise_amp = 1.0
np.random.seed([1])
x, y = np.meshgrid(np.linspace(0, 2.0, n), np.linspace(0, 2.0, n))
z = np.cos(7*x)*np.sin(8*y) + noise_amp*np.random.rand(n, n)
mask = np.where(np.random.rand(n, n) >= mask_level, True, False)
z = np.ma.array(z, mask=mask)
for corner_mask in [False, True]:
fig = plt.figure()
plt.contourf(z, corner_mask=corner_mask)
@cleanup
def test_contourf_decreasing_levels():
# github issue 5477.
z = [[0.1, 0.3], [0.5, 0.7]]
plt.figure()
assert_raises(ValueError, plt.contourf, z, [1.0, 0.0])
# Legacy contouring algorithm gives a warning rather than raising an error,
# plus a DeprecationWarning.
with warnings.catch_warnings(record=True) as w:
plt.contourf(z, [1.0, 0.0], corner_mask='legacy')
assert_equal(len(w), 2)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
nmartensen/pandas | pandas/core/reshape/tile.py | 4 | 13776 | """
Quantilization functions and related stuff
"""
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.common import (
is_integer,
is_scalar,
is_categorical_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
_ensure_int64)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
from pandas._libs.lib import infer_dtype
from pandas import (to_timedelta, to_datetime,
Categorical, Timestamp, Timedelta,
Series, Interval, IntervalIndex)
import numpy as np
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False):
"""
Return indices of half-open bins to which each value of `x` belongs.
Parameters
----------
x : array-like
Input array to be binned. It has to be 1-dimensional.
bins : int, sequence of scalars, or IntervalIndex
If `bins` is an int, it defines the number of equal-width bins in the
range of `x`. However, in this case, the range of `x` is extended
by .1% on each side to include the min or max values of `x`. If
`bins` is a sequence it defines the bin edges allowing for
non-uniform bin width. No extension of the range of `x` is done in
this case.
right : bool, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the bins or not. Can be useful if bins is given
as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
include_lowest : bool, optional
Whether the first interval should be left-inclusive or not.
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
The `cut` function can be useful for going from a continuous variable to
a categorical variable. For example, `cut` could convert ages to groups
of age ranges.
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Categorical object
Examples
--------
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.19, 3.367], (0.19, 3.367], (0.19, 3.367], (3.367, 6.533], ...
Categories (3, interval[float64]): [(0.19, 3.367] < (3.367, 6.533] ...
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]),
... 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, good, medium, bad, good]
Categories (3, object): [good < medium < bad]
>>> pd.cut(np.ones(5), 4, labels=False)
array([1, 1, 1, 1, 1])
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
pass
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {bins!r}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(bins=bins))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = _ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to integer so that cut method can
handle it
"""
dtype = None
if is_timedelta64_dtype(x):
x = to_timedelta(x).view(np.int64)
dtype = np.timedelta64
elif is_datetime64_dtype(x):
x = to_datetime(x).view(np.int64)
dtype = np.datetime64
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-liek of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins)
if is_timedelta64_dtype(dtype):
if bins_dtype in ['timedelta', 'timedelta64']:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype):
if bins_dtype in ['datetime', 'datetime64']:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _format_labels(bins, precision, right=True,
include_lowest=False, dtype=None):
""" based on the dtype, return our labels """
closed = 'right' if right else 'left'
if is_datetime64_dtype(dtype):
formatter = Timestamp
adjust = lambda x: x - Timedelta('1ns')
elif is_timedelta64_dtype(dtype):
formatter = Timedelta
adjust = lambda x: x - Timedelta('1ns')
else:
precision = _infer_precision(precision, bins)
formatter = lambda x: _round_frac(x, precision)
adjust = lambda x: x - 10 ** (-precision)
breaks = [formatter(b) for b in bins]
labels = IntervalIndex.from_breaks(breaks, closed=closed)
if right and include_lowest:
# we will adjust the left hand side by precision to
# account that we are all right closed
v = adjust(labels[0].left)
i = IntervalIndex.from_intervals(
[Interval(v, labels[0].right, closed='right')])
labels = i.append(labels[1:])
return labels
def _preprocess_for_cut(x):
"""
handles preprocessing for cut where we convert passed
input to array, strip the index information and store it
separately
"""
x_is_series = isinstance(x, Series)
series_index = None
name = None
if x_is_series:
series_index = x.index
name = x.name
x = np.asarray(x)
return x_is_series, series_index, name, x
def _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name):
"""
handles post processing for the cut method where
we combine the index information if the originally passed
datatype was a series
"""
if x_is_series:
fac = Series(fac, index=series_index, name=name)
if not retbins:
return fac
return fac, bins
def _round_frac(x, precision):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x
else:
frac, whole = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
else:
digits = precision
return np.around(x, digits)
def _infer_precision(base_precision, bins):
"""Infer an appropriate precision for _round_frac
"""
for precision in range(base_precision, 20):
levels = [_round_frac(b, precision) for b in bins]
if algos.unique(levels).size == bins.size:
return precision
return base_precision # default
| bsd-3-clause |
fhedberg/ardupilot | Tools/LogAnalyzer/tests/TestOptFlow.py | 32 | 14968 | from LogAnalyzer import Test,TestResult
import DataflashLog
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
class TestFlow(Test):
'''test optical flow sensor scale factor calibration'''
#
# Use the following procedure to log the calibration data. is assumed that the optical flow sensor has been
# correctly aligned, is focussed and the test is performed over a textured surface with adequate lighting.
# Note that the strobing effect from non incandescent artifical lighting can produce poor optical flow measurements.
#
# 1) Set LOG_DISARMED and FLOW_TYPE to 10 and verify that ATT and OF messages are being logged onboard
# 2) Place on level ground, apply power and wait for EKF to complete attitude alignment
# 3) Keeping the copter level, lift it to shoulder height and rock between +-20 and +-30 degrees
# in roll about an axis that passes through the flow sensor lens assembly. The time taken to rotate from
# maximum left roll to maximum right roll should be about 1 second.
# 4) Repeat 3) about the pitch axis
# 5) Holding the copter level, lower it to the ground and remove power
# 6) Transfer the logfile from the sdcard.
# 7) Open a terminal and cd to the ardupilot/Tools/LogAnalyzer directory
# 8) Enter to run the analysis 'python LogAnalyzer.py <log file name including full path>'
# 9) Check the OpticalFlow test status printed to the screen. The analysis plots are saved to
# flow_calibration.pdf and the recommended scale factors to flow_calibration.param
def __init__(self):
Test.__init__(self)
self.name = "OpticalFlow"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
# tuning parameters used by the algorithm
tilt_threshold = 15 # roll and pitch threshold used to start and stop calibration (deg)
quality_threshold = 124 # minimum flow quality required for data to be used by the curve fit (N/A)
min_rate_threshold = 0.0 # if the gyro rate is less than this, the data will not be used by the curve fit (rad/sec)
max_rate_threshold = 2.0 # if the gyro rate is greter than this, the data will not be used by the curve fit (rad/sec)
param_std_threshold = 5.0 # maximum allowable 1-std uncertainty in scaling parameter (scale factor * 1000)
param_abs_threshold = 200 # max/min allowable scale factor parameter. Values of FLOW_FXSCALER and FLOW_FYSCALER outside the range of +-param_abs_threshold indicate a sensor configuration problem.
min_num_points = 100 # minimum number of points required for a curve fit - this is necessary, but not sufficient condition - the standard deviation estimate of the fit gradient is also important.
# get the existing scale parameters
flow_fxscaler = logdata.parameters["FLOW_FXSCALER"]
flow_fyscaler = logdata.parameters["FLOW_FYSCALER"]
# load required optical flow data
if "OF" in logdata.channels:
flowX = np.zeros(len(logdata.channels["OF"]["flowX"].listData))
for i in range(len(logdata.channels["OF"]["flowX"].listData)):
(line, flowX[i]) = logdata.channels["OF"]["flowX"].listData[i]
bodyX = np.zeros(len(logdata.channels["OF"]["bodyX"].listData))
for i in range(len(logdata.channels["OF"]["bodyX"].listData)):
(line, bodyX[i]) = logdata.channels["OF"]["bodyX"].listData[i]
flowY = np.zeros(len(logdata.channels["OF"]["flowY"].listData))
for i in range(len(logdata.channels["OF"]["flowY"].listData)):
(line, flowY[i]) = logdata.channels["OF"]["flowY"].listData[i]
bodyY = np.zeros(len(logdata.channels["OF"]["bodyY"].listData))
for i in range(len(logdata.channels["OF"]["bodyY"].listData)):
(line, bodyY[i]) = logdata.channels["OF"]["bodyY"].listData[i]
flow_time_us = np.zeros(len(logdata.channels["OF"]["TimeUS"].listData))
for i in range(len(logdata.channels["OF"]["TimeUS"].listData)):
(line, flow_time_us[i]) = logdata.channels["OF"]["TimeUS"].listData[i]
flow_qual = np.zeros(len(logdata.channels["OF"]["Qual"].listData))
for i in range(len(logdata.channels["OF"]["Qual"].listData)):
(line, flow_qual[i]) = logdata.channels["OF"]["Qual"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no optical flow data\n"
return
# load required attitude data
if "ATT" in logdata.channels:
Roll = np.zeros(len(logdata.channels["ATT"]["Roll"].listData))
for i in range(len(logdata.channels["ATT"]["Roll"].listData)):
(line, Roll[i]) = logdata.channels["ATT"]["Roll"].listData[i]
Pitch = np.zeros(len(logdata.channels["ATT"]["Pitch"].listData))
for i in range(len(logdata.channels["ATT"]["Pitch"].listData)):
(line, Pitch[i]) = logdata.channels["ATT"]["Pitch"].listData[i]
att_time_us = np.zeros(len(logdata.channels["ATT"]["TimeUS"].listData))
for i in range(len(logdata.channels["ATT"]["TimeUS"].listData)):
(line, att_time_us[i]) = logdata.channels["ATT"]["TimeUS"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no attitude data\n"
return
# calculate the start time for the roll calibration
startTime = int(0)
startRollIndex = int(0)
for i in range(len(Roll)):
if abs(Roll[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startRollIndex = i
break
# calculate the end time for the roll calibration
endTime = int(0)
endRollIndex = int(0)
for i in range(len(Roll)-1,-1,-1):
if abs(Roll[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endRollIndex = i
break
# check we have enough roll data points
if (endRollIndex - startRollIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient roll data pointsa\n"
return
# resample roll test data excluding data before first movement and after last movement
# also exclude data where there is insufficient angular rate
flowX_resampled = []
bodyX_resampled = []
flowX_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startRollIndex) and (i <= endRollIndex) and (abs(bodyX[i]) > min_rate_threshold) and (abs(bodyX[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowX_resampled.append(flowX[i])
bodyX_resampled.append(bodyX[i])
flowX_time_us_resampled.append(flow_time_us[i])
# calculate the start time for the pitch calibration
startTime = 0
startPitchIndex = int(0)
for i in range(len(Pitch)):
if abs(Pitch[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startPitchIndex = i
break
# calculate the end time for the pitch calibration
endTime = 0
endPitchIndex = int(0)
for i in range(len(Pitch)-1,-1,-1):
if abs(Pitch[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endPitchIndex = i
break
# check we have enough pitch data points
if (endPitchIndex - startPitchIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient pitch data pointsa\n"
return
# resample pitch test data excluding data before first movement and after last movement
# also exclude data where there is insufficient or too much angular rate
flowY_resampled = []
bodyY_resampled = []
flowY_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startPitchIndex) and (i <= endPitchIndex) and (abs(bodyY[i]) > min_rate_threshold) and (abs(bodyY[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowY_resampled.append(flowY[i])
bodyY_resampled.append(bodyY[i])
flowY_time_us_resampled.append(flow_time_us[i])
# fit a straight line to the flow vs body rate data and calculate the scale factor parameter required to achieve a slope of 1
coef_flow_x , cov_x = np.polyfit(bodyX_resampled,flowX_resampled,1,rcond=None, full=False, w=None, cov=True)
coef_flow_y , cov_y = np.polyfit(bodyY_resampled,flowY_resampled,1,rcond=None, full=False, w=None, cov=True)
# taking the exisiting scale factor parameters into account, calculate the parameter values reequired to achieve a unity slope
flow_fxscaler_new = int(1000 * (((1 + 0.001 * float(flow_fxscaler))/coef_flow_x[0] - 1)))
flow_fyscaler_new = int(1000 * (((1 + 0.001 * float(flow_fyscaler))/coef_flow_y[0] - 1)))
# Do a sanity check on the scale factor variance
if sqrt(cov_x[0][0]) > param_std_threshold or sqrt(cov_y[0][0]) > param_std_threshold:
FAIL()
self.result.statusMessage = "FAIL: inaccurate fit - poor quality or insufficient data\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# Do a sanity check on the scale factors
if abs(flow_fxscaler_new) > param_abs_threshold or abs(flow_fyscaler_new) > param_abs_threshold:
FAIL()
self.result.statusMessage = "FAIL: required scale factors are excessive\nFLOW_FXSCALER=%i\nFLOW_FYSCALER=%i\n" % (flow_fxscaler,flow_fyscaler)
# display recommended scale factors
self.result.statusMessage = "Set FLOW_FXSCALER to %i\nSet FLOW_FYSCALER to %i\n\nCal plots saved to flow_calibration.pdf\nCal parameters saved to flow_calibration.param\n\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (flow_fxscaler_new,flow_fyscaler_new,round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# calculate fit display data
body_rate_display = [-max_rate_threshold,max_rate_threshold]
fit_coef_x = np.poly1d(coef_flow_x)
flowX_display = fit_coef_x(body_rate_display)
fit_coef_y = np.poly1d(coef_flow_y)
flowY_display = fit_coef_y(body_rate_display)
# plot and save calibration test points to PDF
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = "flow_calibration.pdf"
pp = PdfPages(output_plot_filename)
plt.figure(1,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(bodyX_resampled,flowX_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowX_display,'r',linewidth=2.5,label="linear fit")
plt.title('X axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(bodyY_resampled,flowY_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowY_display,'r',linewidth=2.5,label="linear fit")
plt.title('Y axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
plt.figure(2,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(flow_time_us,flowX,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyX,'r',label="gyro rate - all")
plt.plot(flowX_time_us_resampled,flowX_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowX_time_us_resampled,bodyX_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('X axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(flow_time_us,flowY,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyY,'r',label="gyro rate - all")
plt.plot(flowY_time_us_resampled,flowY_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowY_time_us_resampled,bodyY_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('Y axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
# close the pdf file
pp.close()
# close all figures
plt.close("all")
# write correction parameters to file
test_results_filename = "flow_calibration.param"
file = open(test_results_filename,"w")
file.write("FLOW_FXSCALER"+" "+str(flow_fxscaler_new)+"\n")
file.write("FLOW_FYSCALER"+" "+str(flow_fyscaler_new)+"\n")
file.close()
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
| gpl-3.0 |
severinson/coded-computing-tools | tests/simulationtests.py | 2 | 4673 | ############################################################################
# Copyright 2016 Albin Severinson #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
'''This module contains tests of the simulation module.
'''
import os
import math
import unittest
import tempfile
import pandas as pd
import simulation
from functools import partial
from model import SystemParameters
from solvers.heuristicsolver import HeuristicSolver
from evaluation.binsearch import SampleEvaluator
class EvaluationTests(unittest.TestCase):
'''Tests of the simulation module.'''
def verify_result(self, result, correct_result, delta=0.1):
'''Check the results against known correct results.
Args:
result: Measured result.
correct_result: Dict with correct results.
delta: Correct result must be within a delta fraction of the
measured result.
'''
for key, value in correct_result.items():
if value == math.inf:
self.assertAlmostEqual(result[key].mean(), value, places=1,
msg='key={}, value={}'.format(str(key), str(value)))
else:
self.assertAlmostEqual(result[key].mean(), value, delta=value*delta,
msg='key={}, value={}'.format(str(key), str(value)))
def verify_solver(self, solver, parameters, correct_results):
'''Check the results from evaluating the assignment produced by some
solver against known correct results.
Args:
solver: Assignment solver.
parameters: System parameters.
correct_results: List of dicts with correct results.
'''
evaluator = binsearch.SampleEvaluator(num_samples=1000)
for par, correct_result in zip(parameters, correct_results):
assignment = solver.solve(par)
self.assertTrue(assignment.is_valid())
result = evaluator.evaluate(par, assignment)
self.verify_result(result, correct_result)
return
def test_simulation(self):
'''Test basic functionality.'''
parameters = SystemParameters(rows_per_batch=5, num_servers=10, q=9, num_outputs=9,
server_storage=1/3, num_partitions=5)
correct = {'servers': 9, 'batches': 324, 'delay': 25.460714285714285/9,
'unicast_load_1': 720/540/9, 'multicast_load_1': 840/540/9,
'unicast_load_2': 0, 'multicast_load_2': 1470/540/9}
solver = HeuristicSolver()
evaluator = SampleEvaluator(num_samples=1000)
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, parameters.identifier() + '.csv')
dataframe = simulation.simulate(
parameters,
directory=tmpdir,
rerun=False,
samples=10,
solver=solver,
assignment_eval=evaluator,
)
self.verify_result(dataframe, correct)
simulate_fun = partial(
simulation.simulate,
directory=tmpdir,
rerun=False,
samples=10,
solver=solver,
assignment_eval=evaluator,
)
dataframe = simulation.simulate_parameter_list(
parameter_list=[parameters],
simulate_fun=simulate_fun,
map_complexity_fun=lambda x: 1,
encode_delay_fun=lambda x: 0,
reduce_delay_fun=lambda x: 0,
)
self.verify_result(dataframe, correct)
return
| apache-2.0 |
roxyboy/scikit-learn | sklearn/tests/test_dummy.py | 129 | 17774 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
LiuVII/Self-driving-RC-car | train3.py | 1 | 6148 | """"
With admiration for and inspiration from:
https://github.com/dolaameng/Udacity-SDC_Behavior-Cloning/
https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/
https://chatbotslife.com/using-augmentation-to-mimic-human-driving-496b569760a9
https://www.reddit.com/r/MachineLearning/comments/5qbjz7/p_an_autonomous_vehicle_steering_model_in_99/dcyphps/
https://medium.com/@harvitronix/training-a-deep-learning-model-to-steer-a-car-in-99-lines-of-code-ba94e0456e6a
"""
import os
import csv, random, numpy as np, re
import argparse
from keras.models import load_model, Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.preprocessing.image import img_to_array, load_img, flip_axis, random_shift
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from PIL import Image
import PIL
from PIL import ImageOps
from skimage.exposure import equalize_adapthist
oshapeX = 640
oshapeY = 240
NUM_CLASSES = 3
shapeX = 320
shapeY = 120
cshapeY = 120
def model(load, shape, tr_model=None):
"""Return a model from file or to train on."""
if load and tr_model: return load_model(tr_model)
# conv5x5_l, conv3x3_l, dense_layers = [16, 24], [36, 48], [512, 128, 16]
conv3x3_l, dense_layers = [24, 32, 40, 48], [512, 64, 16]
model = Sequential()
model.add(Conv2D(16, (5, 5), activation='elu', input_shape=shape))
model.add(MaxPooling2D())
for i in range(len(conv3x3_l)):
model.add(Conv2D(conv3x3_l[i], (3, 3), activation='elu'))
if i < len(conv3x3_l) - 1:
model.add(MaxPooling2D())
model.add(Flatten())
for dl in dense_layers:
model.add(Dense(dl, activation='elu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
return model
def get_X_y(data_file):
"""Read the log file and turn it into X/y pairs. Add an offset to left images, remove from right images."""
X, y = [], []
with open(data_file) as fin:
reader = csv.reader(fin)
next(reader, None)
for img, command in reader:
X.append(img.strip())
y.append(int(command))
return X, to_categorical(y, num_classes=NUM_CLASSES)
def process_image(file_name, shape=(shapeY, shapeX)):
"""Process and augment an image."""
folder_name = ""
image = load_img(file_name, target_size=shape)
aimage = img_to_array(image)
aimage = aimage.astype(np.float32) / 255.
aimage = aimage - 0.5
return aimage
def _generator(batch_size, classes, X, y):
"""Generate batches of training data forever."""
while 1:
batch_X, batch_y = [], []
for i in range(batch_size):
class_i = random.randint(0, NUM_CLASSES - 1)
sample_index = random.choice(classes[class_i])
command = y[sample_index]
image = process_image(img_dir + X[sample_index])
batch_X.append(image)
batch_y.append(command)
yield np.array(batch_X), np.array(batch_y)
def train(model_name, val_split, epoch_num, step_num):
"""Load our network and our data, fit the model, save it."""
if model_name:
net = model(load=True, shape=(cshapeY, shapeX, 3), tr_model=model_name)
else:
net = model(load=False, shape=(cshapeY, shapeX, 3))
net.summary()
X, y = get_X_y(data_dir + args.img_dir + '_log.csv')
# print("X\n", X[:10], "y\n", y[:10])
Xtr, Xval, ytr, yval = train_test_split(X, y, test_size=val_split, random_state=random.randint(0, 100))
tr_classes = [[] for _ in range(NUM_CLASSES)]
for i in range(len(ytr)):
for j in range(NUM_CLASSES):
if ytr[i][j]:
tr_classes[j].append(i)
val_classes = [[] for _ in range(NUM_CLASSES)]
for i in range(len(yval)):
for j in range(NUM_CLASSES):
if yval[i][j]:
val_classes[j].append(i)
net.fit_generator(_generator(batch_size, tr_classes, Xtr, ytr),\
validation_data=_generator(batch_size, val_classes, Xval, yval),\
validation_steps=max(len(Xval) // batch_size, 1), steps_per_epoch=1, epochs=1)
net.fit_generator(_generator(batch_size, tr_classes, Xtr, ytr),\
validation_data=_generator(batch_size, val_classes, Xval, yval),\
validation_steps=max(len(Xval) // batch_size, 1), steps_per_epoch=step_num, epochs=epoch_num)
if not os.path.exists(model_dir):
os.mkdir(model_dir)
net.save(model_dir + args.img_dir + "_" + str(step_num) + "-" + str(epoch_num) + "_" + str(batch_size) + "_" \
+ str(shapeX) + "x" + str(shapeY) + '.h5')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Trainer')
parser.add_argument(
'img_dir',
type=str,
help='Name of the training set folder. Default: ts_0',
default="ts_0"
)
parser.add_argument(
'steps',
type=int,
help='Training steps. Default: 200',
default=200
)
parser.add_argument(
'-batch',
type=int,
help='Batch size. Default: 64',
default=64
)
parser.add_argument(
'-model',
type=str,
default='',
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'-valid',
type=float,
default=0.15,
help='Validation fraction of data. Default: 0.15'
)
parser.add_argument(
'-epoch',
type=int,
default=1,
help='Number of training epochs. Default: 1'
)
args = parser.parse_args()
batch_size = args.batch
data_dir = "./model_data/"
pos = args.img_dir.find("_s_")
if pos > 0:
img_dir = "./data_sets/" + args.img_dir[:pos] + "/" + "data/"
else:
img_dir = "./data_sets/" + args.img_dir + "/" + "data/"
model_dir = "./models/"
train(args.model, args.valid, args.epoch, args.steps)
| mit |
shahankhatch/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
zcold/matplotlib-3d-objects | shape.py | 1 | 2161 | # -*- coding: utf-8 -*-
class Shape(object):
def __init__(self, ax, x = 0., y = 0., z = 0., a = 1., **kwargs) :
self.ax = ax
self.position = [x, y, z, a]
self.surfaces = []
self.current_position = [x, y, z, a]
@property
def x(self):
return self.position[0]
@x.setter
def x(self, value):
self.position[0] = value
@property
def y(self):
return self.position[1]
@y.setter
def y(self, value):
self.position[1] = value
@property
def z(self):
return self.position[2]
@z.setter
def z(self, value):
self.position[2] = value
@property
def a(self):
return self.position[3]
@a.setter
def a(self, value):
self.position[3] = value
@property
def alpha(self):
return self.position[3]
@alpha.setter
def alpha(self, value):
self.position[3] = value
_dimension_dict = {'x': 0, 'y': 1, 'z': 2, 'a': 3, 'alpha': 3}
def _modify_dimension(self, new_value, dimension = 0) :
if dimension not in [0, 1, 2, 3] :
dimension = Shape._dimension_dict[dimension.lower()]
diff = new_value - self.position[dimension]
for surface in self.surfaces :
for i, __ in enumerate(surface._vec[dimension]) :
surface._vec[dimension][i] += diff
self.position[dimension] = new_value
def modify_x(self, new_x) :
self._modify_dimension(new_x, dimension = 0)
def modify_y(self, new_y) :
self._modify_dimension(new_y, dimension = 1)
def modify_z(self, new_z) :
self._modify_dimension(new_z, dimension = 2)
def modify_alpha(self, new_alpha) :
self._modify_dimension(new_alpha, dimension = 3)
def modify_position(self, *position) :
self.modify_x(position[0])
self.modify_y(position[1])
self.modify_z(position[2])
if __name__ == '__main__' :
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = Axes3D(fig)
ax.view_init(elev = -80., azim = 90)
plt.xlabel('x')
plt.ylabel('y')
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
ax.set_zlim(-10, 10)
ax.set_zlabel('z')
ax.set_zticks([])
s = Shape(ax, x = 0, y = 1, z = 1)
s.modify_x(2)
plt.show()
| mit |
soravux/deap | deap/gp.py | 9 | 46662 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""The :mod:`gp` module provides the methods and classes to perform
Genetic Programming with DEAP. It essentially contains the classes to
build a Genetic Program Tree, and the functions to evaluate it.
This module support both strongly and loosely typed GP.
"""
import copy
import math
import random
import re
import sys
import warnings
from collections import defaultdict, deque
from functools import partial, wraps
from inspect import isclass
from operator import eq, lt
import tools # Needed by HARM-GP
######################################
# GP Data structure #
######################################
# Define the name of type for any types.
__type__ = object
class PrimitiveTree(list):
"""Tree specifically formatted for optimization of genetic
programming operations. The tree is represented with a
list where the nodes are appended in a depth-first order.
The nodes appended to the tree are required to
have an attribute *arity* which defines the arity of the
primitive. An arity of 0 is expected from terminals nodes.
"""
def __init__(self, content):
list.__init__(self, content)
def __deepcopy__(self, memo):
new = self.__class__(self)
new.__dict__.update(copy.deepcopy(self.__dict__, memo))
return new
def __setitem__(self, key, val):
# Check for most common errors
# Does NOT check for STGP constraints
if isinstance(key, slice):
if key.start >= len(self):
raise IndexError("Invalid slice object (try to assign a %s"
" in a tree of size %d). Even if this is allowed by the"
" list object slice setter, this should not be done in"
" the PrimitiveTree context, as this may lead to an"
" unpredictable behavior for searchSubtree or evaluate."
% (key, len(self)))
total = val[0].arity
for node in val[1:]:
total += node.arity - 1
if total != 0:
raise ValueError("Invalid slice assignation : insertion of"
" an incomplete subtree is not allowed in PrimitiveTree."
" A tree is defined as incomplete when some nodes cannot"
" be mapped to any position in the tree, considering the"
" primitives' arity. For instance, the tree [sub, 4, 5,"
" 6] is incomplete if the arity of sub is 2, because it"
" would produce an orphan node (the 6).")
elif val.arity != self[key].arity:
raise ValueError("Invalid node replacement with a node of a"
" different arity.")
list.__setitem__(self, key, val)
def __str__(self):
"""Return the expression in a human readable string.
"""
string = ""
stack = []
for node in self:
stack.append((node, []))
while len(stack[-1][1]) == stack[-1][0].arity:
prim, args = stack.pop()
string = prim.format(*args)
if len(stack) == 0:
break # If stack is empty, all nodes should have been seen
stack[-1][1].append(string)
return string
@classmethod
def from_string(cls, string, pset):
"""Try to convert a string expression into a PrimitiveTree given a
PrimitiveSet *pset*. The primitive set needs to contain every primitive
present in the expression.
:param string: String representation of a Python expression.
:param pset: Primitive set from which primitives are selected.
:returns: PrimitiveTree populated with the deserialized primitives.
"""
tokens = re.split("[ \t\n\r\f\v(),]", string)
expr = []
ret_types = deque()
for token in tokens:
if token == '':
continue
if len(ret_types) != 0:
type_ = ret_types.popleft()
else:
type_ = None
if token in pset.mapping:
primitive = pset.mapping[token]
if type_ is not None and not issubclass(primitive.ret, type_):
raise TypeError("Primitive {} return type {} does not "
"match the expected one: {}."
.format(primitive, primitive.ret, type_))
expr.append(primitive)
if isinstance(primitive, Primitive):
ret_types.extendleft(reversed(primitive.args))
else:
try:
token = eval(token)
except NameError:
raise TypeError("Unable to evaluate terminal: {}.".format(token))
if type_ is None:
type_ = type(token)
if not issubclass(type(token), type_):
raise TypeError("Terminal {} type {} does not "
"match the expected one: {}."
.format(token, type(token), type_))
expr.append(Terminal(token, False, type_))
return cls(expr)
@property
def height(self):
"""Return the height of the tree, or the depth of the
deepest node.
"""
stack = [0]
max_depth = 0
for elem in self:
depth = stack.pop()
max_depth = max(max_depth, depth)
stack.extend([depth + 1] * elem.arity)
return max_depth
@property
def root(self):
"""Root of the tree, the element 0 of the list.
"""
return self[0]
def searchSubtree(self, begin):
"""Return a slice object that corresponds to the
range of values that defines the subtree which has the
element with index *begin* as its root.
"""
end = begin + 1
total = self[begin].arity
while total > 0:
total += self[end].arity - 1
end += 1
return slice(begin, end)
class Primitive(object):
"""Class that encapsulates a primitive and when called with arguments it
returns the Python code to call the primitive with the arguments.
>>> pr = Primitive("mul", (int, int), int)
>>> pr.format(1, 2)
'mul(1, 2)'
"""
__slots__ = ('name', 'arity', 'args', 'ret', 'seq')
def __init__(self, name, args, ret):
self.name = name
self.arity = len(args)
self.args = args
self.ret = ret
args = ", ".join(map("{{{0}}}".format, range(self.arity)))
self.seq = "{name}({args})".format(name=self.name, args=args)
def format(self, *args):
return self.seq.format(*args)
def __eq__(self, other):
if type(self) is type(other):
return all(getattr(self, slot) == getattr(other, slot)
for slot in self.__slots__)
else:
return NotImplemented
class Terminal(object):
"""Class that encapsulates terminal primitive in expression. Terminals can
be values or 0-arity functions.
"""
__slots__ = ('name', 'value', 'ret', 'conv_fct')
def __init__(self, terminal, symbolic, ret):
self.ret = ret
self.value = terminal
self.name = str(terminal)
self.conv_fct = str if symbolic else repr
@property
def arity(self):
return 0
def format(self):
return self.conv_fct(self.value)
def __eq__(self, other):
if type(self) is type(other):
return all(getattr(self, slot) == getattr(other, slot)
for slot in self.__slots__)
else:
return NotImplemented
class Ephemeral(Terminal):
"""Class that encapsulates a terminal which value is set when the
object is created. To mutate the value, a new object has to be
generated. This is an abstract base class. When subclassing, a
staticmethod 'func' must be defined.
"""
def __init__(self):
Terminal.__init__(self, self.func(), symbolic=False, ret=self.ret)
@staticmethod
def func():
"""Return a random value used to define the ephemeral state.
"""
raise NotImplementedError
class PrimitiveSetTyped(object):
"""Class that contains the primitives that can be used to solve a
Strongly Typed GP problem. The set also defined the researched
function return type, and input arguments type and number.
"""
def __init__(self, name, in_types, ret_type, prefix="ARG"):
self.terminals = defaultdict(list)
self.primitives = defaultdict(list)
self.arguments = []
# setting "__builtins__" to None avoid the context
# being polluted by builtins function when evaluating
# GP expression.
self.context = {"__builtins__": None}
self.mapping = dict()
self.terms_count = 0
self.prims_count = 0
self.name = name
self.ret = ret_type
self.ins = in_types
for i, type_ in enumerate(in_types):
arg_str = "{prefix}{index}".format(prefix=prefix, index=i)
self.arguments.append(arg_str)
term = Terminal(arg_str, True, type_)
self._add(term)
self.terms_count += 1
def renameArguments(self, **kargs):
"""Rename function arguments with new names from *kargs*.
"""
for i, old_name in enumerate(self.arguments):
if old_name in kargs:
new_name = kargs[old_name]
self.arguments[i] = new_name
self.mapping[new_name] = self.mapping[old_name]
self.mapping[new_name].value = new_name
del self.mapping[old_name]
def _add(self, prim):
def addType(dict_, ret_type):
if not ret_type in dict_:
new_list = []
for type_, list_ in dict_.items():
if issubclass(type_, ret_type):
for item in list_:
if not item in new_list:
new_list.append(item)
dict_[ret_type] = new_list
addType(self.primitives, prim.ret)
addType(self.terminals, prim.ret)
self.mapping[prim.name] = prim
if isinstance(prim, Primitive):
for type_ in prim.args:
addType(self.primitives, type_)
addType(self.terminals, type_)
dict_ = self.primitives
else:
dict_ = self.terminals
for type_ in dict_:
if issubclass(prim.ret, type_):
dict_[type_].append(prim)
def addPrimitive(self, primitive, in_types, ret_type, name=None):
"""Add a primitive to the set.
:param primitive: callable object or a function.
:parma in_types: list of primitives arguments' type
:param ret_type: type returned by the primitive.
:param name: alternative name for the primitive instead
of its __name__ attribute.
"""
if name is None:
name = primitive.__name__
prim = Primitive(name, in_types, ret_type)
assert name not in self.context or \
self.context[name] is primitive, \
"Primitives are required to have a unique name. " \
"Consider using the argument 'name' to rename your "\
"second '%s' primitive." % (name,)
self._add(prim)
self.context[prim.name] = primitive
self.prims_count += 1
def addTerminal(self, terminal, ret_type, name=None):
"""Add a terminal to the set. Terminals can be named
using the optional *name* argument. This should be
used : to define named constant (i.e.: pi); to speed the
evaluation time when the object is long to build; when
the object does not have a __repr__ functions that returns
the code to build the object; when the object class is
not a Python built-in.
:param terminal: Object, or a function with no arguments.
:param ret_type: Type of the terminal.
:param name: defines the name of the terminal in the expression.
"""
symbolic = False
if name is None and callable(terminal):
name = terminal.__name__
assert name not in self.context, \
"Terminals are required to have a unique name. " \
"Consider using the argument 'name' to rename your "\
"second %s terminal." % (name,)
if name is not None:
self.context[name] = terminal
terminal = name
symbolic = True
elif terminal in (True, False):
# To support True and False terminals with Python 2.
self.context[str(terminal)] = terminal
prim = Terminal(terminal, symbolic, ret_type)
self._add(prim)
self.terms_count += 1
def addEphemeralConstant(self, name, ephemeral, ret_type):
"""Add an ephemeral constant to the set. An ephemeral constant
is a no argument function that returns a random value. The value
of the constant is constant for a Tree, but may differ from one
Tree to another.
:param name: name used to refers to this ephemeral type.
:param ephemeral: function with no arguments returning a random value.
:param ret_type: type of the object returned by *ephemeral*.
"""
module_gp = globals()
if not name in module_gp:
class_ = type(name, (Ephemeral,), {'func': staticmethod(ephemeral),
'ret': ret_type})
module_gp[name] = class_
else:
class_ = module_gp[name]
if issubclass(class_, Ephemeral):
if class_.func is not ephemeral:
raise Exception("Ephemerals with different functions should "
"be named differently, even between psets.")
elif class_.ret is not ret_type:
raise Exception("Ephemerals with the same name and function "
"should have the same type, even between psets.")
else:
raise Exception("Ephemerals should be named differently "
"than classes defined in the gp module.")
self._add(class_)
self.terms_count += 1
def addADF(self, adfset):
"""Add an Automatically Defined Function (ADF) to the set.
:param adfset: PrimitiveSetTyped containing the primitives with which
the ADF can be built.
"""
prim = Primitive(adfset.name, adfset.ins, adfset.ret)
self._add(prim)
self.prims_count += 1
@property
def terminalRatio(self):
"""Return the ratio of the number of terminals on the number of all
kind of primitives.
"""
return self.terms_count / float(self.terms_count + self.prims_count)
class PrimitiveSet(PrimitiveSetTyped):
"""Class same as :class:`~deap.gp.PrimitiveSetTyped`, except there is no
definition of type.
"""
def __init__(self, name, arity, prefix="ARG"):
args = [__type__] * arity
PrimitiveSetTyped.__init__(self, name, args, __type__, prefix)
def addPrimitive(self, primitive, arity, name=None):
"""Add primitive *primitive* with arity *arity* to the set.
If a name *name* is provided, it will replace the attribute __name__
attribute to represent/identify the primitive.
"""
assert arity > 0, "arity should be >= 1"
args = [__type__] * arity
PrimitiveSetTyped.addPrimitive(self, primitive, args, __type__, name)
def addTerminal(self, terminal, name=None):
"""Add a terminal to the set."""
PrimitiveSetTyped.addTerminal(self, terminal, __type__, name)
def addEphemeralConstant(self, name, ephemeral):
"""Add an ephemeral constant to the set."""
PrimitiveSetTyped.addEphemeralConstant(self, name, ephemeral, __type__)
######################################
# GP Tree compilation functions #
######################################
def compile(expr, pset):
"""Compile the expression *expr*.
:param expr: Expression to compile. It can either be a PrimitiveTree,
a string of Python code or any object that when
converted into string produced a valid Python code
expression.
:param pset: Primitive set against which the expression is compile.
:returns: a function if the primitive set has 1 or more arguments,
or return the results produced by evaluating the tree.
"""
code = str(expr)
if len(pset.arguments) > 0:
# This section is a stripped version of the lambdify
# function of SymPy 0.6.6.
args = ",".join(arg for arg in pset.arguments)
code = "lambda {args}: {code}".format(args=args, code=code)
try:
return eval(code, pset.context, {})
except MemoryError:
_, _, traceback = sys.exc_info()
raise MemoryError, ("DEAP : Error in tree evaluation :"
" Python cannot evaluate a tree higher than 90. "
"To avoid this problem, you should use bloat control on your "
"operators. See the DEAP documentation for more information. "
"DEAP will now abort."), traceback
def compileADF(expr, psets):
"""Compile the expression represented by a list of trees. The first
element of the list is the main tree, and the following elements are
automatically defined functions (ADF) that can be called by the first
tree.
:param expr: Expression to compile. It can either be a PrimitiveTree,
a string of Python code or any object that when
converted into string produced a valid Python code
expression.
:param psets: List of primitive sets. Each set corresponds to an ADF
while the last set is associated with the expression
and should contain reference to the preceding ADFs.
:returns: a function if the main primitive set has 1 or more arguments,
or return the results produced by evaluating the tree.
"""
adfdict = {}
func = None
for pset, subexpr in reversed(zip(psets, expr)):
pset.context.update(adfdict)
func = compile(subexpr, pset)
adfdict.update({pset.name: func})
return func
######################################
# GP Program generation functions #
######################################
def genFull(pset, min_, max_, type_=None):
"""Generate an expression where each leaf has a the same depth
between *min* and *max*.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: A full tree with all leaves at the same depth.
"""
def condition(height, depth):
"""Expression generation stops when the depth is equal to height."""
return depth == height
return generate(pset, min_, max_, condition, type_)
def genGrow(pset, min_, max_, type_=None):
"""Generate an expression where each leaf might have a different depth
between *min* and *max*.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: A grown tree with leaves at possibly different depths.
"""
def condition(height, depth):
"""Expression generation stops when the depth is equal to height
or when it is randomly determined that a a node should be a terminal.
"""
return depth == height or \
(depth >= min_ and random.random() < pset.terminalRatio)
return generate(pset, min_, max_, condition, type_)
def genHalfAndHalf(pset, min_, max_, type_=None):
"""Generate an expression with a PrimitiveSet *pset*.
Half the time, the expression is generated with :func:`~deap.gp.genGrow`,
the other half, the expression is generated with :func:`~deap.gp.genFull`.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: Either, a full or a grown tree.
"""
method = random.choice((genGrow, genFull))
return method(pset, min_, max_, type_)
def genRamped(pset, min_, max_, type_=None):
"""
.. deprecated:: 1.0
The function has been renamed. Use :func:`~deap.gp.genHalfAndHalf` instead.
"""
warnings.warn("gp.genRamped has been renamed. Use genHalfAndHalf instead.",
FutureWarning)
return genHalfAndHalf(pset, min_, max_, type_)
def generate(pset, min_, max_, condition, type_=None):
"""Generate a Tree as a list of list. The tree is build
from the root to the leaves, and it stop growing when the
condition is fulfilled.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param condition: The condition is a function that takes two arguments,
the height of the tree to build and the current
depth in the tree.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: A grown tree with leaves at possibly different depths
dependending on the condition function.
"""
if type_ is None:
type_ = pset.ret
expr = []
height = random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
if condition(height, depth):
try:
term = random.choice(pset.terminals[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError, "The gp.generate function tried to add "\
"a terminal of type '%s', but there is "\
"none available." % (type_,), traceback
if isclass(term):
term = term()
expr.append(term)
else:
try:
prim = random.choice(pset.primitives[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError, "The gp.generate function tried to add "\
"a primitive of type '%s', but there is "\
"none available." % (type_,), traceback
expr.append(prim)
for arg in reversed(prim.args):
stack.append((depth + 1, arg))
return expr
######################################
# GP Crossovers #
######################################
def cxOnePoint(ind1, ind2):
"""Randomly select in each individual and exchange each subtree with the
point as root between each individual.
:param ind1: First tree participating in the crossover.
:param ind2: Second tree participating in the crossover.
:returns: A tuple of two trees.
"""
if len(ind1) < 2 or len(ind2) < 2:
# No crossover on single node tree
return ind1, ind2
# List all available primitive types in each individual
types1 = defaultdict(list)
types2 = defaultdict(list)
if ind1.root.ret == __type__:
# Not STGP optimization
types1[__type__] = xrange(1, len(ind1))
types2[__type__] = xrange(1, len(ind2))
common_types = [__type__]
else:
for idx, node in enumerate(ind1[1:], 1):
types1[node.ret].append(idx)
for idx, node in enumerate(ind2[1:], 1):
types2[node.ret].append(idx)
common_types = set(types1.keys()).intersection(set(types2.keys()))
if len(common_types) > 0:
type_ = random.choice(list(common_types))
index1 = random.choice(types1[type_])
index2 = random.choice(types2[type_])
slice1 = ind1.searchSubtree(index1)
slice2 = ind2.searchSubtree(index2)
ind1[slice1], ind2[slice2] = ind2[slice2], ind1[slice1]
return ind1, ind2
def cxOnePointLeafBiased(ind1, ind2, termpb):
"""Randomly select crossover point in each individual and exchange each
subtree with the point as root between each individual.
:param ind1: First typed tree participating in the crossover.
:param ind2: Second typed tree participating in the crossover.
:param termpb: The probability of chosing a terminal node (leaf).
:returns: A tuple of two typed trees.
When the nodes are strongly typed, the operator makes sure the
second node type corresponds to the first node type.
The parameter *termpb* sets the probability to choose between a terminal
or non-terminal crossover point. For instance, as defined by Koza, non-
terminal primitives are selected for 90% of the crossover points, and
terminals for 10%, so *termpb* should be set to 0.1.
"""
if len(ind1) < 2 or len(ind2) < 2:
# No crossover on single node tree
return ind1, ind2
# Determine wether we keep terminals or primitives for each individual
terminal_op = partial(eq, 0)
primitive_op = partial(lt, 0)
arity_op1 = terminal_op if random.random() < termpb else primitive_op
arity_op2 = terminal_op if random.random() < termpb else primitive_op
# List all available primitive or terminal types in each individual
types1 = defaultdict(list)
types2 = defaultdict(list)
for idx, node in enumerate(ind1[1:], 1):
if arity_op1(node.arity):
types1[node.ret].append(idx)
for idx, node in enumerate(ind2[1:], 1):
if arity_op2(node.arity):
types2[node.ret].append(idx)
common_types = set(types1.keys()).intersection(set(types2.keys()))
if len(common_types) > 0:
# Set does not support indexing
type_ = random.sample(common_types, 1)[0]
index1 = random.choice(types1[type_])
index2 = random.choice(types2[type_])
slice1 = ind1.searchSubtree(index1)
slice2 = ind2.searchSubtree(index2)
ind1[slice1], ind2[slice2] = ind2[slice2], ind1[slice1]
return ind1, ind2
######################################
# GP Mutations #
######################################
def mutUniform(individual, expr, pset):
"""Randomly select a point in the tree *individual*, then replace the
subtree at that point as a root by the expression generated using method
:func:`expr`.
:param individual: The tree to be mutated.
:param expr: A function object that can generate an expression when
called.
:returns: A tuple of one tree.
"""
index = random.randrange(len(individual))
slice_ = individual.searchSubtree(index)
type_ = individual[index].ret
individual[slice_] = expr(pset=pset, type_=type_)
return individual,
def mutNodeReplacement(individual, pset):
"""Replaces a randomly chosen primitive from *individual* by a randomly
chosen primitive with the same number of arguments from the :attr:`pset`
attribute of the individual.
:param individual: The normal or typed tree to be mutated.
:returns: A tuple of one tree.
"""
if len(individual) < 2:
return individual,
index = random.randrange(1, len(individual))
node = individual[index]
if node.arity == 0: # Terminal
term = random.choice(pset.terminals[node.ret])
if isclass(term):
term = term()
individual[index] = term
else: # Primitive
prims = [p for p in pset.primitives[node.ret] if p.args == node.args]
individual[index] = random.choice(prims)
return individual,
def mutEphemeral(individual, mode):
"""This operator works on the constants of the tree *individual*. In
*mode* ``"one"``, it will change the value of one of the individual
ephemeral constants by calling its generator function. In *mode*
``"all"``, it will change the value of **all** the ephemeral constants.
:param individual: The normal or typed tree to be mutated.
:param mode: A string to indicate to change ``"one"`` or ``"all"``
ephemeral constants.
:returns: A tuple of one tree.
"""
if mode not in ["one", "all"]:
raise ValueError("Mode must be one of \"one\" or \"all\"")
ephemerals_idx = [index
for index, node in enumerate(individual)
if isinstance(node, Ephemeral)]
if len(ephemerals_idx) > 0:
if mode == "one":
ephemerals_idx = (random.choice(ephemerals_idx),)
for i in ephemerals_idx:
individual[i] = type(individual[i])()
return individual,
def mutInsert(individual, pset):
"""Inserts a new branch at a random position in *individual*. The subtree
at the chosen position is used as child node of the created subtree, in
that way, it is really an insertion rather than a replacement. Note that
the original subtree will become one of the children of the new primitive
inserted, but not perforce the first (its position is randomly selected if
the new primitive has more than one child).
:param individual: The normal or typed tree to be mutated.
:returns: A tuple of one tree.
"""
index = random.randrange(len(individual))
node = individual[index]
slice_ = individual.searchSubtree(index)
choice = random.choice
# As we want to keep the current node as children of the new one,
# it must accept the return value of the current node
primitives = [p for p in pset.primitives[node.ret] if node.ret in p.args]
if len(primitives) == 0:
return individual,
new_node = choice(primitives)
new_subtree = [None] * len(new_node.args)
position = choice([i for i, a in enumerate(new_node.args) if a == node.ret])
for i, arg_type in enumerate(new_node.args):
if i != position:
term = choice(pset.terminals[arg_type])
if isclass(term):
term = term()
new_subtree[i] = term
new_subtree[position:position + 1] = individual[slice_]
new_subtree.insert(0, new_node)
individual[slice_] = new_subtree
return individual,
def mutShrink(individual):
"""This operator shrinks the *individual* by chosing randomly a branch and
replacing it with one of the branch's arguments (also randomly chosen).
:param individual: The tree to be shrinked.
:returns: A tuple of one tree.
"""
# We don't want to "shrink" the root
if len(individual) < 3 or individual.height <= 1:
return individual,
iprims = []
for i, node in enumerate(individual[1:], 1):
if isinstance(node, Primitive) and node.ret in node.args:
iprims.append((i, node))
if len(iprims) != 0:
index, prim = random.choice(iprims)
arg_idx = random.choice([i for i, type_ in enumerate(prim.args) if type_ == prim.ret])
rindex = index + 1
for _ in range(arg_idx + 1):
rslice = individual.searchSubtree(rindex)
subtree = individual[rslice]
rindex += len(subtree)
slice_ = individual.searchSubtree(index)
individual[slice_] = subtree
return individual,
######################################
# GP bloat control decorators #
######################################
def staticLimit(key, max_value):
"""Implement a static limit on some measurement on a GP tree, as defined
by Koza in [Koza1989]. It may be used to decorate both crossover and
mutation operators. When an invalid (over the limit) child is generated,
it is simply replaced by one of its parents, randomly selected.
This operator can be used to avoid memory errors occuring when the tree
gets higher than 90 levels (as Python puts a limit on the call stack
depth), because it can ensure that no tree higher than this limit will ever
be accepted in the population, except if it was generated at initialization
time.
:param key: The function to use in order the get the wanted value. For
instance, on a GP tree, ``operator.attrgetter('height')`` may
be used to set a depth limit, and ``len`` to set a size limit.
:param max_value: The maximum value allowed for the given measurement.
:returns: A decorator that can be applied to a GP operator using \
:func:`~deap.base.Toolbox.decorate`
.. note::
If you want to reproduce the exact behavior intended by Koza, set
*key* to ``operator.attrgetter('height')`` and *max_value* to 17.
.. [Koza1989] J.R. Koza, Genetic Programming - On the Programming of
Computers by Means of Natural Selection (MIT Press,
Cambridge, MA, 1992)
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
keep_inds = [copy.deepcopy(ind) for ind in args]
new_inds = list(func(*args, **kwargs))
for i, ind in enumerate(new_inds):
if key(ind) > max_value:
new_inds[i] = random.choice(keep_inds)
return new_inds
return wrapper
return decorator
######################################
# GP bloat control algorithms #
######################################
def harm(population, toolbox, cxpb, mutpb, ngen,
alpha, beta, gamma, rho, nbrindsmodel=-1, mincutoff=20,
stats=None, halloffame=None, verbose=__debug__):
"""Implement bloat control on a GP evolution using HARM-GP, as defined in
[Gardner2015]. It is implemented in the form of an evolution algorithm
(similar to :func:`~deap.algorithms.eaSimple`).
:param population: A list of individuals.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param cxpb: The probability of mating two individuals.
:param mutpb: The probability of mutating an individual.
:param ngen: The number of generation.
:param alpha: The HARM *alpha* parameter.
:param beta: The HARM *beta* parameter.
:param gamma: The HARM *gamma* parameter.
:param rho: The HARM *rho* parameter.
:param nbrindsmodel: The number of individuals to generate in order to
model the natural distribution. -1 is a special
value which uses the equation proposed in
[Gardner2015] to set the value of this parameter :
max(2000, len(population))
:param mincutoff: The absolute minimum value for the cutoff point. It is
used to ensure that HARM does not shrink the population
too much at the beginning of the evolution. The default
value is usually fine.
:param stats: A :class:`~deap.tools.Statistics` object that is updated
inplace, optional.
:param halloffame: A :class:`~deap.tools.HallOfFame` object that will
contain the best individuals, optional.
:param verbose: Whether or not to log the statistics.
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution
This function expects the :meth:`toolbox.mate`, :meth:`toolbox.mutate`,
:meth:`toolbox.select` and :meth:`toolbox.evaluate` aliases to be
registered in the toolbox.
.. note::
The recommended values for the HARM-GP parameters are *alpha=0.05*,
*beta=10*, *gamma=0.25*, *rho=0.9*. However, these parameters can be
adjusted to perform better on a specific problem (see the relevant
paper for tuning information). The number of individuals used to
model the natural distribution and the minimum cutoff point are less
important, their default value being effective in most cases.
.. [Gardner2015] M.-A. Gardner, C. Gagne, and M. Parizeau, Controlling
Code Growth by Dynamically Shaping the Genotype Size Distribution,
Genetic Programming and Evolvable Machines, 2015,
DOI 10.1007/s10710-015-9242-8
"""
def _genpop(n, pickfrom=[], acceptfunc=lambda s: True, producesizes=False):
# Generate a population of n individuals, using individuals in
# *pickfrom* if possible, with a *acceptfunc* acceptance function.
# If *producesizes* is true, also return a list of the produced
# individuals sizes.
# This function is used 1) to generate the natural distribution
# (in this case, pickfrom and acceptfunc should be let at their
# default values) and 2) to generate the final population, in which
# case pickfrom should be the natural population previously generated
# and acceptfunc a function implementing the HARM-GP algorithm.
producedpop = []
producedpopsizes = []
while len(producedpop) < n:
if len(pickfrom) > 0:
# If possible, use the already generated
# individuals (more efficient)
aspirant = pickfrom.pop()
if acceptfunc(len(aspirant)):
producedpop.append(aspirant)
if producesizes:
producedpopsizes.append(len(aspirant))
else:
opRandom = random.random()
if opRandom < cxpb:
# Crossover
aspirant1, aspirant2 = toolbox.mate(*map(toolbox.clone,
toolbox.select(population, 2)))
del aspirant1.fitness.values, aspirant2.fitness.values
if acceptfunc(len(aspirant1)):
producedpop.append(aspirant1)
if producesizes:
producedpopsizes.append(len(aspirant1))
if len(producedpop) < n and acceptfunc(len(aspirant2)):
producedpop.append(aspirant2)
if producesizes:
producedpopsizes.append(len(aspirant2))
else:
aspirant = toolbox.clone(toolbox.select(population, 1)[0])
if opRandom - cxpb < mutpb:
# Mutation
aspirant = toolbox.mutate(aspirant)[0]
del aspirant.fitness.values
if acceptfunc(len(aspirant)):
producedpop.append(aspirant)
if producesizes:
producedpopsizes.append(len(aspirant))
if producesizes:
return producedpop, producedpopsizes
else:
return producedpop
halflifefunc = lambda x: (x * float(alpha) + beta)
if nbrindsmodel == -1:
nbrindsmodel = max(2000, len(population))
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print logbook.stream
# Begin the generational process
for gen in range(1, ngen + 1):
# Estimation population natural distribution of sizes
naturalpop, naturalpopsizes = _genpop(nbrindsmodel, producesizes=True)
naturalhist = [0] * (max(naturalpopsizes) + 3)
for indsize in naturalpopsizes:
# Kernel density estimation application
naturalhist[indsize] += 0.4
naturalhist[indsize - 1] += 0.2
naturalhist[indsize + 1] += 0.2
naturalhist[indsize + 2] += 0.1
if indsize - 2 >= 0:
naturalhist[indsize - 2] += 0.1
# Normalization
naturalhist = [val * len(population) / nbrindsmodel for val in naturalhist]
# Cutoff point selection
sortednatural = sorted(naturalpop, key=lambda ind: ind.fitness)
cutoffcandidates = sortednatural[int(len(population) * rho - 1):]
# Select the cutoff point, with an absolute minimum applied
# to avoid weird cases in the first generations
cutoffsize = max(mincutoff, len(min(cutoffcandidates, key=len)))
# Compute the target distribution
targetfunc = lambda x: (gamma * len(population) * math.log(2) /
halflifefunc(x)) * math.exp(-math.log(2) *
(x - cutoffsize) / halflifefunc(x))
targethist = [naturalhist[binidx] if binidx <= cutoffsize else
targetfunc(binidx) for binidx in range(len(naturalhist))]
# Compute the probabilities distribution
probhist = [t / n if n > 0 else t for n, t in zip(naturalhist, targethist)]
probfunc = lambda s: probhist[s] if s < len(probhist) else targetfunc(s)
acceptfunc = lambda s: random.random() <= probfunc(s)
# Generate offspring using the acceptance probabilities
# previously computed
offspring = _genpop(len(population), pickfrom=naturalpop,
acceptfunc=acceptfunc, producesizes=False)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print logbook.stream
return population, logbook
def graph(expr):
"""Construct the graph of a tree expression. The tree expression must be
valid. It returns in order a node list, an edge list, and a dictionary of
the per node labels. The node are represented by numbers, the edges are
tuples connecting two nodes (number), and the labels are values of a
dictionary for which keys are the node numbers.
:param expr: A tree expression to convert into a graph.
:returns: A node list, an edge list, and a dictionary of labels.
The returned objects can be used directly to populate a
`pygraphviz <http://networkx.lanl.gov/pygraphviz/>`_ graph::
import pygraphviz as pgv
# [...] Execution of code that produce a tree expression
nodes, edges, labels = graph(expr)
g = pgv.AGraph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
g.layout(prog="dot")
for i in nodes:
n = g.get_node(i)
n.attr["label"] = labels[i]
g.draw("tree.pdf")
or a `NetworX <http://networkx.github.com/>`_ graph::
import matplotlib.pyplot as plt
import networkx as nx
# [...] Execution of code that produce a tree expression
nodes, edges, labels = graph(expr)
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.graphviz_layout(g, prog="dot")
nx.draw_networkx_nodes(g, pos)
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, pos, labels)
plt.show()
.. note::
We encourage you to use `pygraphviz
<http://networkx.lanl.gov/pygraphviz/>`_ as the nodes might be plotted
out of order when using `NetworX <http://networkx.github.com/>`_.
"""
nodes = range(len(expr))
edges = list()
labels = dict()
stack = []
for i, node in enumerate(expr):
if stack:
edges.append((stack[-1][0], i))
stack[-1][1] -= 1
labels[i] = node.name if isinstance(node, Primitive) else node.value
stack.append([i, node.arity])
while stack and stack[-1][1] == 0:
stack.pop()
return nodes, edges, labels
if __name__ == "__main__":
import doctest
doctest.testmod()
| lgpl-3.0 |
kdebrab/pandas | scripts/find_undoc_args.py | 5 | 5098 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script that compares the signature arguments with the ones in the docsting
and returns the differences in plain text or GitHub task list format.
Usage::
$ ./find_undoc_args.py (see arguments below)
"""
from __future__ import print_function
import sys
from collections import namedtuple
import types
import os
import re
import argparse
import inspect
parser = argparse.ArgumentParser(description='Program description.')
parser.add_argument('-p', '--path', metavar='PATH', type=str, required=False,
default=None, action='store',
help='full path relative to which paths wills be reported')
parser.add_argument('-m', '--module', metavar='MODULE', type=str,
required=True, action='store',
help='name of package to import and examine')
parser.add_argument('-G', '--github_repo', metavar='REPO', type=str,
required=False, default=None, action='store',
help='github project where the code lives, '
'e.g. "pandas-dev/pandas"')
args = parser.parse_args()
Entry = namedtuple('Entry',
'func path lnum undoc_names missing_args '
'nsig_names ndoc_names')
def entry_gen(root_ns, module_name):
"""Walk and yield all methods and functions in the module root_ns and
submodules."""
q = [root_ns]
seen = set()
while q:
ns = q.pop()
for x in dir(ns):
cand = getattr(ns, x)
if (isinstance(cand, types.ModuleType) and
cand.__name__ not in seen and
cand.__name__.startswith(module_name)):
seen.add(cand.__name__)
q.insert(0, cand)
elif (isinstance(cand, (types.MethodType, types.FunctionType)) and
cand not in seen and cand.__doc__):
seen.add(cand)
yield cand
def cmp_docstring_sig(f):
"""Return an `Entry` object describing the differences between the
arguments in the signature and the documented ones."""
def build_loc(f):
path = f.__code__.co_filename.split(args.path, 1)[-1][1:]
return dict(path=path, lnum=f.__code__.co_firstlineno)
sig_names = set(inspect.getargspec(f).args)
# XXX numpydoc can be used to get the list of parameters
doc = f.__doc__.lower()
doc = re.split('^\s*parameters\s*', doc, 1, re.M)[-1]
doc = re.split('^\s*returns*', doc, 1, re.M)[0]
doc_names = {x.split(":")[0].strip() for x in doc.split('\n')
if re.match('\s+[\w_]+\s*:', x)}
sig_names.discard('self')
doc_names.discard('kwds')
doc_names.discard('kwargs')
doc_names.discard('args')
return Entry(func=f, path=build_loc(f)['path'], lnum=build_loc(f)['lnum'],
undoc_names=sig_names.difference(doc_names),
missing_args=doc_names.difference(sig_names),
nsig_names=len(sig_names), ndoc_names=len(doc_names))
def format_id(i):
return i
def format_item_as_github_task_list(i, item, repo):
tmpl = ('- [ ] {id_}) [{fname}:{lnum} ({func_name}())]({link}) - '
'__Missing__[{nmissing}/{total_args}]: {undoc_names}')
link_tmpl = "https://github.com/{repo}/blob/master/{file}#L{lnum}"
link = link_tmpl.format(repo=repo, file=item.path, lnum=item.lnum)
s = tmpl.format(id_=i, fname=item.path, lnum=item.lnum,
func_name=item.func.__name__, link=link,
nmissing=len(item.undoc_names),
total_args=item.nsig_names,
undoc_names=list(item.undoc_names))
if item.missing_args:
s += ' __Extra__(?): %s' % list(item.missing_args)
return s
def format_item_as_plain(i, item):
tmpl = ('+{lnum} {path} {func_name}(): '
'Missing[{nmissing}/{total_args}]={undoc_names}')
s = tmpl.format(path=item.path, lnum=item.lnum,
func_name=item.func.__name__,
nmissing=len(item.undoc_names),
total_args=item.nsig_names,
undoc_names=list(item.undoc_names))
if item.missing_args:
s += ' Extra(?)=%s' % list(item.missing_args)
return s
def main():
module = __import__(args.module)
if not args.path:
args.path = os.path.dirname(module.__file__)
collect = [cmp_docstring_sig(e)
for e in entry_gen(module, module.__name__)]
# only include if there are missing arguments in the docstring
# (fewer false positives) and there are at least some documented arguments
collect = [e for e in collect
if e.undoc_names and len(e.undoc_names) != e.nsig_names]
collect.sort(key=lambda x: x.path)
if args.github_repo:
for i, item in enumerate(collect, 1):
print(format_item_as_github_task_list(i, item, args.github_repo))
else:
for i, item in enumerate(collect, 1):
print(format_item_as_plain(i, item))
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
Mecanon/morphing_wing | dynamic_model/results/flexinol_SMA/config_C/max_deflection/power_usage_2.py | 3 | 11206 | # -*- coding: utf-8 -*-
"""
Analyze the heating, current and power usage of teh actuation
Created on Thu Apr 28 09:56:23 2016
@author: Pedro Leal
"""
import math
import numpy as np
import pickle
import matplotlib.pyplot as plt
#Time step
delta_t = 0.005
sigma_o = 100e6
r = 0.000381/2.
d = 2*r
alpha = 0. #set to zero on purpose
c = 837.36
rho = 6450.
#Transformation strain properties
H_max = 0.0550
H_min = 0.0387
sigma_crit = 0
k = 4.6849e-09
rho_E_M = 0.8e-6 #Dynalloy
rho_E_A = 1.0e-6 #Dynalloy
E_A = 3.7427e+10
E_M = 8.8888e+10
C_A = 7.9498e+06
C_M = 7.1986e+06
M_s = 363.5013
M_f = 297.9735
A_s = 324.6427
A_f = 385.0014
n1 = 0.1752
n2 = 0.1789
n3 = 0.1497
n4 = 0.2935
sigma_cal = 200E6
#Load data
Data = pickle.load(open( "data.p", "rb" ))
sigma = Data['sigma']
T = Data['T']
xi = Data['xi']
eps_s = Data['eps_s']
L_s = Data['L_s']
T_o = T[0]
n = len(eps_s)
#==============================================================================
# Calculate output work
#==============================================================================
W_list = []
deltaW_list = []
Total_work = 0
total_work_list = []
for i in range(1, n):
delta_eps = abs(eps_s[i] - eps_s[i-1])
delta_sigma = abs(sigma[i] - sigma[i-1])
# avg_eps = abs(eps_s[i] + eps_s[i-1])/2.
# avg_sigma = abs(sigma[i] + sigma[i-1])/2.
av_eps = (eps_s[i] + eps_s[i-1])/2.
av_sigma = (sigma[i] + sigma[i-1])/2.
dW = math.pi*r**2*L_s[0]*0.5*(sigma[i]+sigma[i-1])*delta_eps
deltaW = math.pi*r**2*L_s[0]*(eps_s[i]*delta_sigma/delta_t + sigma[i]*delta_eps/delta_t)
W_list.append(dW)
deltaW_list.append(deltaW)
Total_work += deltaW
total_work_list.append(Total_work)
Total_work = sum(W_list)*delta_t
Total_delta = sum(deltaW_list)*delta_t
#print Total_delta
plt.figure()
plt.plot(eps_s)
plt.figure()
plt.plot(sigma)
#==============================================================================
# Calculate input heat for different h
#==============================================================================
h_list = np.linspace(0,100., 6)
P_h_list = []
total_power_list = []
for j in range(len(h_list)):
h = h_list[j]
P_list = []
I_list = []
a = 0
b = 0
for i in range(1, n):
delta_sigma = sigma[i] - sigma[i-1]
delta_T = T[i] - T[i-1]
delta_xi = xi[i] - xi[i-1]
rho_E = rho_E_M*xi[i] + (1-xi[i])*rho_E_A
if abs(sigma[i]) <= sigma_crit:
dH_cur = 0
else:
dH_cur = k*(H_max-H_min)*math.exp(-k*(abs(sigma[i])-sigma_crit))*np.sign(sigma[i])
H_cur = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_o) - sigma_crit)))
H_cur_cal = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_cal) - sigma_crit)))
rho_delta_s0 = (-2*(C_M*C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/(C_M + C_A)
a1 = rho_delta_s0*(M_f - M_s)
a2 = rho_delta_s0*(A_s - A_f)
a3 = -a1/4 * (1 + 1/(n1+1) - 1/(n2+1)) + a2/4 * (1+1/(n3+1) - 1/(n4+1))
Y_0_t = rho_delta_s0/2*(M_s - A_f) - a3
D = ((C_M - C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/((C_M + C_A)*(H_cur_cal+ sigma_cal*dH_cur))
pi_t = Y_0_t + D*abs(sigma[i])*H_cur
#constant h
I = r*math.pi*math.sqrt((r/rho_E)*((r/delta_t)*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) ) + \
2.*h*(T[i] - T_o))))
P = math.pi*r**2*L_s[i]*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) )/delta_t + \
2.*(h/r)*(T[i] - T_o))
print P, T[i]*alpha*delta_sigma, rho*c*delta_T, delta_xi*(-pi_t + rho_delta_s0*T[i])
a += rho*c*delta_T
b += delta_xi*(-pi_t + rho_delta_s0*T[i])
# print a,b
I_list.append(I)
P_list.append(P)
P_h_list.append(P_list)
Total_power = 0
for i in range(len(P_list)-1):
Total_power += delta_t*P_list[i]
total_power_list.append(Total_power)
t = np.linspace(0,(n-2)*delta_t, n-1)
#plt.figure()
#plt.plot(t, I_list, 'b')
#plt.scatter(t, I_list, c = 'b')
#plt.xlabel('Time (s)')
#plt.ylabel('Current (A)')
#plt.axis([min(t) - 0.02*(max(t)-min(t)), max(t)+ 0.02*(max(t)-min(t)),
# min(I_list) - 0.02*(max(I_list)-min(I_list)),
# max(I_list) + 0.02*(max(I_list)-min(I_list))])
#plt.grid()
plt.figure()
for i in range(len(h_list)):
color=((1.-float(i)/(len(h_list)-1), float(i)/(len(h_list)-1),0, 1.))
plt.plot(t, P_h_list[i], label = 'h = ' + str(h_list[i]), color = color)
plt.plot(t, deltaW_list, 'b', label = '$\dot{W}$')
#plt.plot(t, W_list, 'b', label = '$\dot{W}$')
#plt.scatter(t, P_list, c = 'b')
plt.xlabel('Time (s)')
plt.ylabel('Power (W)')
#plt.axis([min(t) - 0.02*(max(t)-min(t)), max(t)+ 0.02*(max(t)-min(t)),
# min(P_list) - 0.02*(max(P_list)-min(P_list)),
# max(P_list) + 0.02*(max(P_list)-min(P_list))])
plt.grid()
plt.legend(loc= 'upper left')
plt.figure()
plt.plot(h_list, total_power_list)
plt.xlabel('Convection coefficient')
plt.ylabel('Total power consumption (J)')
plt.grid()
plt.figure()
plt.plot(h_list, 100.*Total_delta/np.array(total_power_list))
plt.xlabel('Convection coefficient $h$ ')
plt.ylabel('Efficiency (%)')
plt.grid()
print 'Total adiabatic power is %f Joules' % total_power_list[0]
print 'Total work is %f Joules' % Total_delta
print 'Adiabatic efficiency is %f ' % (Total_delta/total_power_list[0])
#==============================================================================
# Calculate input heat for different delta_t
#==============================================================================
delta_t_list = np.linspace(0.001,0.05, 50)
#h = 10.
h_dt_power_list = []
for i in range(len(h_list)):
h = h_list[i]
total_power_list = []
for j in range(len(delta_t_list)):
delta_t = delta_t_list[j]
P_list = []
I_list = []
a = 0
b = 0
for i in range(1, n):
delta_sigma = sigma[i] - sigma[i-1]
delta_T = T[i] - T[i-1]
delta_xi = xi[i] - xi[i-1]
rho_E = rho_E_M*xi[i] + (1-xi[i])*rho_E_A
if abs(sigma[i]) <= sigma_crit:
dH_cur = 0
else:
dH_cur = k*(H_max-H_min)*math.exp(-k*(abs(sigma[i])-sigma_crit))*np.sign(sigma[i])
H_cur = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_o) - sigma_crit)))
H_cur_cal = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_cal) - sigma_crit)))
rho_delta_s0 = (-2*(C_M*C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/(C_M + C_A)
a1 = rho_delta_s0*(M_f - M_s)
a2 = rho_delta_s0*(A_s - A_f)
a3 = -a1/4 * (1 + 1/(n1+1) - 1/(n2+1)) + a2/4 * (1+1/(n3+1) - 1/(n4+1))
Y_0_t = rho_delta_s0/2*(M_s - A_f) - a3
D = ((C_M - C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/((C_M + C_A)*(H_cur_cal+ sigma_cal*dH_cur))
pi_t = Y_0_t + D*abs(sigma[i])*H_cur
#constant h
I = r*math.pi*math.sqrt((r/rho_E)*((r/delta_t)*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) ) + \
2.*h*(T[i] - T_o))))
P = math.pi*r**2*L_s[i]*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) )/delta_t + \
2.*(h/r)*(T[i] - T_o))
a += rho*c*delta_T
b += delta_xi*(-pi_t + rho_delta_s0*T[i])
# print a,b
I_list.append(I)
P_list.append(P)
Total_power = 0
for i in range(len(P_list)-1):
Total_power += delta_t*P_list[i]
total_power_list.append(Total_power)
h_dt_power_list.append(total_power_list)
t = np.linspace(0,(n-2)*delta_t, n-1)
plt.figure()
for i in range(len(h_list)):
# print len(h_dt_power_list)
color=((1.-float(i)/(len(h_list)-1), float(i)/(len(h_list)-1),0, 1.))
plt.plot(delta_t_list, 100.*Total_delta/np.array(h_dt_power_list[i]),
color = color, label= 'h = %.f' % h_list[i])
plt.xlabel('$\Delta t$ ')
plt.ylabel('Efficiency (%)')
plt.grid()
plt.legend(loc='best')
#==============================================================================
# Calculate heat input for different T_o
#==============================================================================
delta_t = 0.05
h = 10. #invented (adiabatic)
T_list = np.linspace(200,300., 5)
P_T_list = []
total_power_list = []
for j in range(len(T_list)):
T_o = T_list[j]
P_list = []
I_list = []
for i in range(1, n):
delta_sigma = sigma[i] - sigma[i-1]
delta_T = T[i] - T[i-1]
delta_xi = xi[i] - xi[i-1]
rho_E = rho_E_M*xi[i] + (1-xi[i])*rho_E_A
if abs(sigma[i]) <= sigma_crit:
dH_cur = 0
else:
dH_cur = k*(H_max-H_min)*math.exp(-k*(abs(sigma[i])-sigma_crit))*np.sign(sigma[i])
H_cur = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_o) - sigma_crit)))
H_cur_cal = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_cal) - sigma_crit)))
rho_delta_s0 = (-2*(C_M*C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/(C_M + C_A)
a1 = rho_delta_s0*(M_f - M_s)
a2 = rho_delta_s0*(A_s - A_f)
a3 = -a1/4 * (1 + 1/(n1+1) - 1/(n2+1)) + a2/4 * (1+1/(n3+1) - 1/(n4+1))
Y_0_t = rho_delta_s0/2*(M_s - A_f) - a3
D = ((C_M - C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/((C_M + C_A)*(H_cur_cal+ sigma_cal*dH_cur))
pi_t = Y_0_t + D*abs(sigma[i])*H_cur
#constant h
# I = r*math.pi*math.sqrt((r/rho_E)*((r/delta_t)*((T[i]*alpha*delta_sigma + \
# rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) ) + \
# 2.*h*(T[i] - T_o))))
#
P = math.pi*r**2*L_s[i]*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) )/delta_t + \
2.*(h/r)*(T[i] - T_o))
I_list.append(I)
P_list.append(P)
P_T_list.append(P_list)
Total_power = 0
for i in range(len(P_list)-1):
Total_power += delta_t*(P_list[i] + P_list[i+1])/2.
total_power_list.append(Total_power)
plt.figure()
for i in range(len(T_list)):
color = ((1.-float(i)/(len(T_list)-1), float(i)/(len(T_list)-1),0, 1.))
plt.plot(t, P_T_list[i], 'b', label = '$T_o$ = ' + str(T_list[i]), color = color)
#plt.scatter(t, P_list, c = 'b')
plt.xlabel('Time (s)')
plt.ylabel('Power (W)')
#plt.axis([min(t) - 0.02*(max(t)-min(t)), max(t)+ 0.02*(max(t)-min(t)),
# min(P_list) - 0.02*(max(P_list)-min(P_list)),
# max(P_list) + 0.02*(max(P_list)-min(P_list))])
plt.grid()
plt.legend(loc= 'upper left')
plt.figure()
plt.plot(T_list, total_power_list)
plt.xlabel('Temperature (K)')
plt.ylabel('Total power consumption (J)')
plt.grid() | mit |
h2educ/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
bmmalone/pymisc-utils | pyllars/sklearn_transformers/nan_standard_scaler.py | 1 | 4256 | import logging
logger = logging.getLogger(__name__)
import numpy as np
import pandas as pd
import sklearn.base
import pyllars.validation_utils as validation_utils
class NaNStandardScaler(sklearn.base.TransformerMixin):
""" Scale the specified columns to zero mean and unit variance while
ignoring np.nan's.
This is expected to make more of a difference when there are many np.nan
values which may incorrectly suggest a smaller empirical variance if just
replaced by the mean.
Additionally, this class maintains the presence of the np.nan's, so
downstream tasks can handle them as appropriate.
"""
def __init__(self, columns=None):
self.col_mean_ = None
self.col_std_ = None
self.columns = columns
def fit(self, X, *_):
if self.columns is None:
if isinstance(X, pd.DataFrame):
self.columns_ = X.columns
elif isinstance(X, np.ndarray):
if len(X.shape) == 1:
X = X.reshape(-1, 1)
self.columns_ = np.arange(X.shape[1])
else:
self.columns_ = self.columns
# now, actually grab the columns depending on the type of X
if isinstance(X, pd.DataFrame):
X_cols = X[self.columns_]
elif isinstance(X, np.ndarray):
X_cols = X[:,self.columns_]
else:
msg = ("[NanStandardScaler.fit]: unrecognized data type: {}".
format(type(X)))
raise ValueError(msg)
###
# make sure we have numpy floats. we might not in cases where
# the original data matrix contains mixed types (categorical and
# numeric types)
#
# See this SO comment for more details:
# https://stackoverflow.com/questions/18557337/
###
X_cols = X_cols.astype(float)
self.col_mean_ = np.nanmean(X_cols, axis=0)
self.col_std_ = np.nanstd(X_cols, axis=0)
# we will not do anything with observations we see less than twice
m_zero = self.col_std_ == 0
m_nan = np.isnan(self.col_std_)
self.col_ignore_ = m_zero | m_nan
# do this to avoid divide by zero warnings later
self.col_mean_ = np.nan_to_num(self.col_mean_)
self.col_std_[self.col_ignore_] = 1
return self
def transform(self, X, *_):
to_check = [
'col_mean_',
'col_std_',
'columns_',
'col_ignore_',
]
validation_utils.check_is_fitted(self, to_check)
# if we did not see a column in the training, or if it had only one
# value, we cannot really do anything with it
# so ignore those
# do not overwrite our original information
X = X.copy()
# now, actually grab the columns depending on the type of X
if isinstance(X, pd.DataFrame):
X_cols = X[self.columns_].copy()
X_cols.iloc[:, self.col_ignore_] = 0
elif isinstance(X, np.ndarray):
# check if we have a single vector
if len(X.shape) == 1:
#X[self.col_ignore_] = 0
X = X.reshape(-1, 1)
X_cols = X[:,self.columns_]
X_cols[:,self.col_ignore_] = 0
else:
msg = ("[NanStandardScaler.transform]: unrecognized data type: {}".
format(type(X)))
raise ValueError(msg)
X_transform = ((X_cols - self.col_mean_) / self.col_std_)
# and stick the columns back
if isinstance(X, pd.DataFrame):
X[self.columns_] = X_transform
else:
X[:,self.columns_] = X_transform
return X
@classmethod
def get_scaler(cls, means, stds):
scaler = cls()
num_features = means.shape[0]
scaler.col_mean_ = means
scaler.col_std_ = stds
scaler.columns_ = np.arange(num_features)
m_zero = scaler.col_std_ == 0
m_nan = np.isnan(scaler.col_std_)
scaler.col_ignore_ = m_zero | m_nan
scaler.col_mean_ = np.nan_to_num(scaler.col_mean_)
scaler.col_std_[scaler.col_ignore_] = 1
return scaler
| mit |
gaoxianglong/oceanbase | oceanbase_0.4/tools/deploy/perf/1.py | 12 | 1857 | import datetime
import re
import sys
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
time_format = "%Y-%m-%d %H:%M:%S"
d = dict()
start_time = None
start_time = None
sql_count = 0
sql_time = 0
sql_time_dist = dict()
rpc_time = 0
urpc_time = 0
wait_time = 0
qps2time = dict()
rpc_times = []
urpc_times = []
wait_times = []
for l in sys.stdin:
m = re.search(r'trace_id=\[(\d+)\] sql=\[.*\] sql_to_logicalplan=\[\d+\] logicalplan_to_physicalplan=\[\d+\] handle_sql_time=\[(\d+)\] wait_sql_queue_time=\[(\d+)\] rpc:channel_id=\[\d+\] latency=\[(\d+)\] print_time=\[(\d+)\]', l)
if m is not None:
end_time = int(m.group(5))
if start_time is None:
start_time = end_time
trace_id = m.group(1)
ts = m.group(5)[:-6]
d[trace_id] = dict(
sql_time = int(m.group(2)),
wait_time = int(m.group(3)),
rpc_time = int(m.group(4)),
)
sql_count += 1
sql_time += d[trace_id]['sql_time']
if sql_time_dist.has_key(d[trace_id]['sql_time']):
sql_time_dist[d[trace_id]['sql_time']] += 1
else:
sql_time_dist[d[trace_id]['sql_time']] = 0
wait_time += d[trace_id]['wait_time']
wait_times.append(d[trace_id]['wait_time'])
rpc_time += d[trace_id]['rpc_time']
rpc_times.append(d[trace_id]['rpc_time'])
if qps2time.has_key(ts):
qps2time[ts] += 1
else:
qps2time[ts] = 0
elapsed_seconds = (end_time - start_time) / 10**6
qps = sql_count / elapsed_seconds
avg_sql_time = float(sql_time) / sql_count
avg_rpc_time = float(rpc_time) / sql_count
avg_urpc_time = float(urpc_time) / sql_count
avg_wait_time = float(wait_time) / sql_count
print "QPS: %d" % (qps)
print "AVG TIME: %f" % (avg_sql_time)
print "AVG RPC TIME: %f" % (avg_rpc_time)
print "AVG WAIT TIME: %f" % (avg_wait_time)
| gpl-2.0 |
flightgong/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
twneale/pypscl | pscl/rollcall.py | 1 | 4640 | import numpy as np
from pandas.rpy import common as rpy_common
from rpy2.robjects.packages import importr
from .base import Field, Translator, Wrapper
from .accessors import ValueAccessor, VectorAccessor
from .ordfile import OrdFile
from .wnominate import wnominate
from .ideal import ideal
pscl = importr('pscl')
class NumberOfLegislators(VectorAccessor):
'''Number of legislators in the rollcall object, after processing the
dropList.
'''
key = 'n'
class NumberOfRollcalls(VectorAccessor):
'''Number of roll call votes in the rollcall object, after
processing the dropList.
'''
key = 'm'
class AllVotes(VectorAccessor):
'''A matrix containing a tabular breakdown of all votes in the
rollcall matrix (object$votes), after processing the
dropList.
'''
key = 'allVotes'
class Votes(VectorAccessor):
'''
'''
class Source(VectorAccessor):
'''A fake, hard-coded C: filesystem location of the ord file. Useless.
'''
class RollcallSummary(Wrapper):
all_votes = AllVotes()
n = NumberOfLegislators()
m = NumberOfRollcalls()
eq_attrs = ('m', 'n', 'codes', 'all_votes')
@property
def codes(self):
'''Return a mapping of vote values like "yea" or "nay" to
the "codes" (in the docs, integers) that represent them
in the underlying Rollcall object.
'''
items = []
for yes_no_other, value_list in self['codes'].iteritems():
items.append((yes_no_other, tuple(value_list)))
return dict(items)
class Rollcall(Wrapper):
'''A wrapper for the pscl rollcall object.
'''
# Wrapped R functions ---------------------------------------------------
def drop_unanimous(self, lop=0):
self.obj = pscl.dropUnanimous(self.obj, lop=0)
return self
def summary(self):
return RollcallSummary(pscl.summary_rollcall(self.obj))
# Accessors ---------------------------------------------------------------
n = NumberOfLegislators()
m = NumberOfRollcalls()
votes = Votes()
source = Source()
eq_attrs = ('m', 'n', 'codes', 'all_votes')
@property
def codes(self):
'''Return a mapping of vote values like "yea" or "nay" to
the "codes" (in the docs, integers) that represent them
in the underlying Rollcall object.
'''
items = []
for yes_no_other, value_list in self['codes'].iteritems():
items.append((yes_no_other, tuple(value_list)))
return dict(items)
# Alternative constructors ------------------------------------------------
@classmethod
def from_matrix(cls, r_matrix, **kwargs):
'''Instantiate a Rollcall object from an R matrix, of the kind
described in the pscl docs.
See http://cran.r-project.org/web/packages/pscl/pscl.pdf
'''
return _RollcallTranslator(**kwargs).r_object(r_matrix)
@classmethod
def from_dataframe(cls, dataframe, **kwargs):
'''Instantiate a Rollcall object from a pandas.DataFrame corresponding
to the R matrix described in the pscl docs.
See http://cran.r-project.org/web/packages/pscl/pscl.pdf
'''
r_matrix = rpy_common.convert_to_r_matrix(dataframe)
return cls.from_matrix(r_matrix, **kwargs)
@classmethod
def from_ordfile(cls, fp, **kwargs):
'''Instantiate a RollCall object from an ordfile.
'''
dataframe = OrdFile(fp).as_dataframe()
rollcall = cls.from_dataframe(dataframe,
yea=[1.0, 2.0, 3.0],
nay=[4.0, 5.0, 6.0],
missing=[7.0, 8.0, 9.0],
not_in_legis=0.0,
legis_names=tuple(dataframe.index), **kwargs)
return rollcall
# Analysis methods -------------------------------------------------------
def ideal(self, *args, **kwargs):
'''
'''
return ideal(self, *args, **kwargs)
def wnominate(self, polarity, *args, **kwargs):
'''
'''
return wnominate(self, polarity, *args, **kwargs)
class _RollcallTranslator(Translator):
'''A python wrapper around the R pscl pacakge's rollcall object.
'''
r_type = pscl.rollcall
wrapper = Rollcall
yea = Field(name='yea', default=1)
nay = Field(name='nay', default=2)
not_in_legis = Field(name='notInLegis', default=9)
field_names = (
('missing', 'missing'),
('legis_names', 'legis.names'),
('vote_names', 'vote.names'),
('legis_data', 'legis.data'),
('vote_data', 'vote.data'),
('desc', 'desc'),
('source', 'source'))
| bsd-3-clause |
ChinaQuants/blaze | blaze/compute/csv.py | 11 | 2667 | from __future__ import absolute_import, division, print_function
import pandas
import os
from toolz import curry, concat
import pandas as pd
import numpy as np
from collections import Iterator, Iterable
from odo import into
from odo.chunks import chunks
from odo.backends.csv import CSV
from multipledispatch import MDNotImplementedError
from ..dispatch import dispatch
from ..expr import Expr, Head, ElemWise, Distinct, Symbol, Projection, Field
from ..expr.core import path
from ..utils import available_memory
from ..expr.split import split
from .core import compute
from ..expr.optimize import lean_projection
from .pmap import get_default_pmap
__all__ = ['optimize', 'pre_compute', 'compute_chunk', 'compute_down']
@dispatch(Expr, CSV)
def optimize(expr, _):
return lean_projection(expr) # This is handled in pre_compute
@dispatch(Expr, CSV)
def pre_compute(expr, data, comfortable_memory=None, chunksize=2**18, **kwargs):
comfortable_memory = comfortable_memory or min(1e9, available_memory() / 4)
kwargs = dict()
# Chunk if the file is large
if os.path.getsize(data.path) > comfortable_memory:
kwargs['chunksize'] = chunksize
else:
chunksize = None
# Insert projection into read_csv
oexpr = optimize(expr, data)
leaf = oexpr._leaves()[0]
pth = list(path(oexpr, leaf))
if len(pth) >= 2 and isinstance(pth[-2], (Projection, Field)):
kwargs['usecols'] = pth[-2].fields
if chunksize:
return into(chunks(pd.DataFrame), data, dshape=leaf.dshape, **kwargs)
else:
return into(pd.DataFrame, data, dshape=leaf.dshape, **kwargs)
Cheap = (Head, ElemWise, Distinct, Symbol)
@dispatch(Head, CSV)
def pre_compute(expr, data, **kwargs):
leaf = expr._leaves()[0]
if all(isinstance(e, Cheap) for e in path(expr, leaf)):
return into(Iterator, data, chunksize=10000, dshape=leaf.dshape)
else:
raise MDNotImplementedError()
def compute_chunk(chunk, chunk_expr, part):
return compute(chunk_expr, {chunk: part})
@dispatch(Expr, pandas.io.parsers.TextFileReader)
def compute_down(expr, data, map=None, **kwargs):
if map is None:
map = get_default_pmap()
leaf = expr._leaves()[0]
(chunk, chunk_expr), (agg, agg_expr) = split(leaf, expr)
parts = list(map(curry(compute_chunk, chunk, chunk_expr), data))
if isinstance(parts[0], np.ndarray):
intermediate = np.concatenate(parts)
elif isinstance(parts[0], pd.DataFrame):
intermediate = pd.concat(parts)
elif isinstance(parts[0], (Iterable, Iterator)):
intermediate = concat(parts)
return compute(agg_expr, {agg: intermediate})
| bsd-3-clause |
louispotok/pandas | pandas/tests/plotting/test_groupby.py | 9 | 2451 | # coding: utf-8
""" Test cases for GroupBy.plot """
from pandas import Series, DataFrame
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import numpy as np
from pandas.tests.plotting.common import TestPlotBase
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(['male', 'female'], size=n)
weight.groupby(gender).plot()
tm.close()
height.groupby(gender).hist()
tm.close()
# Regression test for GH8733
height.groupby(gender).plot(alpha=0.5)
tm.close()
def test_plotting_with_float_index_works(self):
# GH 7025
df = DataFrame({'def': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'val': np.random.randn(9)},
index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0])
df.groupby('def')['val'].plot()
tm.close()
df.groupby('def')['val'].apply(lambda x: x.plot())
tm.close()
def test_hist_single_row(self):
# GH10214
bins = np.arange(80, 100 + 2, 1)
df = DataFrame({"Name": ["AAA", "BBB"],
"ByCol": [1, 2],
"Mark": [85, 89]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
def test_plot_submethod_works(self):
df = DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1, 2, 3, 2, 1],
'z': list('ababa')})
df.groupby('z').plot.scatter('x', 'y')
tm.close()
df.groupby('z')['x'].plot.line()
tm.close()
def test_plot_kwargs(self):
df = DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1, 2, 3, 2, 1],
'z': list('ababa')})
res = df.groupby('z').plot(kind='scatter', x='x', y='y')
# check that a scatter plot is effectively plotted: the axes should
# contain a PathCollection from the scatter plot (GH11805)
assert len(res['a'].collections) == 1
res = df.groupby('z').plot.scatter(x='x', y='y')
assert len(res['a'].collections) == 1
| bsd-3-clause |
DimensionalScoop/kautschuk | AP_SS16/504/python/helpers.py | 2 | 2188 | import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import uncertainties
from scipy.optimize import curve_fit
from uncertainties import ufloat
from uncertainties.unumpy import uarray
maxfev = 1000000
def autofit(x, y, fitFunction, p0_=None):
"""Returns params of the curvefit as ufloat."""
if isinstance(y[0], uncertainties.UFloat):
ny = [i.nominal_value for i in y]
dy = [i.std_dev for i in y]
params, covariance = curve_fit(fitFunction, x, ny, sigma=dy, absolute_sigma=True,
p0=p0_, maxfev=maxfev)
else:
params, covariance = curve_fit(fitFunction, x, y, p0=p0_, maxfev=maxfev)
errors = np.sqrt(np.diag(covariance))
return uarray(params, errors)
def combine_measurements(values):
"""Combines a np.array of measurements into one ufloat"""
return ufloat(mean(values), stdDevOfMean(values))
def mean(values):
"""Return the mean of values"""
values = np.array(values)
return sum(values) / len(values)
def stdDev(values):
"""Return estimated standard deviation"""
values = np.array(values)
b = 0
m = mean(values)
for x in values:
b += (x - m) ** 2
return np.sqrt(1 / (len(values) - 1) * b)
def stdDevOfMean(values):
"""Return estimated standard deviation of the mean (the important one!)"""
return stdDev(values) / np.sqrt(len(values))
def cutErrors(values):
"""Converts an array of ufloat to an array of floats, discarding errors"""
return np.array([v.nominal_value for v in values])
def estimate_sigmas(values, ableseunsicherheit):
"""Generates std deviations for analoge instruments. Returns a ufloatarray."""
nominal = values
magnitude = np.floor(np.log10(nominal))
error = [ableseunsicherheit * 10**mag for mag in magnitude]
return uarray(nominal, error)
def estimate_sigmas_only(values, ableseunsicherheit):
"""Generates std deviations for analoge instruments. Returns only an array with the errors."""
nominal = values
magnitude = np.floor(np.log10(nominal))
error = [ableseunsicherheit * 10**mag for mag in magnitude]
return error
| mit |
manashmndl/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
neuroidss/nupic | examples/opf/clients/cpu/cpu.py | 10 | 3122 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""A simple client to read CPU usage and predict it in real time."""
from collections import deque
import time
import psutil
import matplotlib.pyplot as plt
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.model_factory import ModelFactory
import model_params
SECONDS_PER_STEP = 2
WINDOW = 60
# turn matplotlib interactive mode on (ion)
plt.ion()
fig = plt.figure()
# plot title, legend, etc
plt.title('CPU prediction example')
plt.xlabel('time [s]')
plt.ylabel('CPU usage [%]')
def runCPU():
"""Poll CPU usage, make predictions, and plot the results. Runs forever."""
# Create the model for predicting CPU usage.
model = ModelFactory.create(model_params.MODEL_PARAMS)
model.enableInference({'predictedField': 'cpu'})
# The shifter will align prediction and actual values.
shifter = InferenceShifter()
# Keep the last WINDOW predicted and actual values for plotting.
actHistory = deque([0.0] * WINDOW, maxlen=60)
predHistory = deque([0.0] * WINDOW, maxlen=60)
# Initialize the plot lines that we will update with each new record.
actline, = plt.plot(range(WINDOW), actHistory)
predline, = plt.plot(range(WINDOW), predHistory)
# Set the y-axis range.
actline.axes.set_ylim(0, 100)
predline.axes.set_ylim(0, 100)
while True:
s = time.time()
# Get the CPU usage.
cpu = psutil.cpu_percent()
# Run the input through the model and shift the resulting prediction.
modelInput = {'cpu': cpu}
result = shifter.shift(model.run(modelInput))
# Update the trailing predicted and actual value deques.
inference = result.inferences['multiStepBestPredictions'][5]
if inference is not None:
actHistory.append(result.rawInput['cpu'])
predHistory.append(inference)
# Redraw the chart with the new data.
actline.set_ydata(actHistory) # update the data
predline.set_ydata(predHistory) # update the data
plt.draw()
plt.legend( ('actual','predicted') )
# Make sure we wait a total of 2 seconds per iteration.
try:
plt.pause(SECONDS_PER_STEP)
except:
pass
if __name__ == "__main__":
runCPU()
| agpl-3.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/animation/old_animation/strip_chart_demo.py | 6 | 2137 | """
Emulate an oscilloscope. Requires the animation API introduced in
matplotlib 0.84. See
http://www.scipy.org/wikis/topical_software/Animations for an
explanation.
This example uses gtk but does not depend on it intimately. It just
uses the idle handler to trigger events. You can plug this into a
different GUI that supports animation (GTKAgg, TkAgg, WXAgg) and use
your toolkits idle/timer functions.
"""
import gobject
import matplotlib
matplotlib.use('GTKAgg')
import numpy as np
from matplotlib.lines import Line2D
class Scope:
def __init__(self, ax, maxt=10, dt=0.01):
self.ax = ax
self.canvas = ax.figure.canvas
self.dt = dt
self.maxt = maxt
self.tdata = [0]
self.ydata = [0]
self.line = Line2D(self.tdata, self.ydata, animated=True)
self.ax.add_line(self.line)
self.background = None
self.canvas.mpl_connect('draw_event', self.update_background)
self.ax.set_ylim(-.1, 1.1)
self.ax.set_xlim(0, self.maxt)
def update_background(self, event):
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def emitter(self, p=0.01):
'return a random value with probability p, else 0'
v = np.random.rand(1)
if v>p: return 0.
else: return np.random.rand(1)
def update(self, *args):
if self.background is None: return True
y = self.emitter()
lastt = self.tdata[-1]
if lastt>self.tdata[0]+self.maxt: # reset the arrays
self.tdata = [self.tdata[-1]]
self.ydata = [self.ydata[-1]]
self.ax.set_xlim(self.tdata[0], self.tdata[0]+self.maxt)
self.ax.figure.canvas.draw()
self.canvas.restore_region(self.background)
t = self.tdata[-1] + self.dt
self.tdata.append(t)
self.ydata.append(y)
self.line.set_data(self.tdata, self.ydata)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
return True
from pylab import figure, show
fig = figure()
ax = fig.add_subplot(111)
scope = Scope(ax)
gobject.idle_add(scope.update)
show()
| mit |
hlin117/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 9 | 51137 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from ..utils import deprecated
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function_, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.n_jobs = int(n_jobs)
@property
@deprecated("Attribute loss_function was deprecated in version 0.19 and "
"will be removed in 0.21. Use 'loss_function_' instead")
def loss_function(self):
return self.loss_function_
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
loss_function_ : concrete ``LossFunction``
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "average_coef_", None) is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
nmartensen/pandas | scripts/gen_release_notes.py | 6 | 2434 | from __future__ import print_function
import sys
import json
from pandas.io.common import urlopen
from datetime import datetime
class Milestone(object):
def __init__(self, title, number):
self.title = title
self.number = number
def __eq__(self, other):
if isinstance(other, Milestone):
return self.number == other.number
return False
class Issue(object):
def __init__(self, title, labels, number, milestone, body, state):
self.title = title
self.labels = set([x['name'] for x in labels])
self.number = number
self.milestone = milestone
self.body = body
self.closed = state == 'closed'
def __eq__(self, other):
if isinstance(other, Issue):
return self.number == other.number
return False
def get_issues():
all_issues = []
page_number = 1
while True:
iss = _get_page(page_number)
if len(iss) == 0:
break
page_number += 1
all_issues.extend(iss)
return all_issues
def _get_page(page_number):
gh_url = ('https://api.github.com/repos/pandas-dev/pandas/issues?'
'milestone=*&state=closed&assignee=*&page=%d') % page_number
with urlopen(gh_url) as resp:
rs = resp.readlines()[0]
jsondata = json.loads(rs)
issues = [Issue(x['title'], x['labels'], x['number'],
get_milestone(x['milestone']), x['body'], x['state'])
for x in jsondata]
return issues
def get_milestone(data):
if data is None:
return None
return Milestone(data['title'], data['number'])
def collate_label(issues, label):
lines = []
for x in issues:
if label in x.labels:
lines.append('\t- %s(#%d)' % (x.title, x.number))
return '\n'.join(lines)
def release_notes(milestone):
issues = get_issues()
headers = ['New Features', 'Improvements to existing features',
'API Changes', 'Bug fixes']
labels = ['New', 'Enhancement', 'API-Change', 'Bug']
rs = 'pandas %s' % milestone
rs += '\n' + ('=' * len(rs))
rs += '\n\n **Release date:** %s' % datetime.today().strftime('%B %d, %Y')
for i, h in enumerate(headers):
rs += '\n\n**%s**\n\n' % h
l = labels[i]
rs += collate_label(issues, l)
return rs
if __name__ == '__main__':
rs = release_notes(sys.argv[1])
print(rs)
| bsd-3-clause |
zorroblue/scikit-learn | examples/cluster/plot_segmentation_toy.py | 33 | 3442 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
# #############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
# #############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
BhallaLab/moose-core | tests/support/test_hhfit.py | 2 | 6029 | # -*- coding: utf-8 -*-
# Author: Subha
# Maintainer: Dilawar Singh
# Created: Tue May 21 16:34:45 2013 (+0530)
# This test is fragile.
from __future__ import print_function, division, absolute_import
import numpy as np
import unittest
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import moose.neuroml2.hhfit as hhfit
np.random.seed(10)
class TestFindRateFn(unittest.TestCase):
def setUp(self):
self.vmin = -120e-3
self.vmax = 40e-3
self.vdivs = 640
self.v_array = np.linspace(self.vmin, self.vmax, self.vdivs + 1)
# Parameters for sigmoid function - from traub2005, NaF->m_inf
p_sigmoid = (1.0, 1 / -10e-3, -38e-3, 0.0)
self.sigmoid = p_sigmoid[0] / (
1.0 + np.exp(p_sigmoid[1] *
(self.v_array - p_sigmoid[2]))) + p_sigmoid[3]
self.p_sigmoid = p_sigmoid
# Parameters for exponential function - from traub2005, KC->n_inf
p_exp = (2e3, 1 / -27e-3, -53.5e-3, 0.0)
self.exp = p_exp[0] * np.exp(p_exp[1] *
(self.v_array - p_exp[2])) + p_exp[3]
self.p_exp = p_exp
# Parameters for linoid function: alpha_n from original Hodgkin-Huxley K channel.
p_linoid = (-0.01 * 1e3, -1 / 10e-3, 10e-3, 0.0)
self.linoid = p_linoid[3] + p_linoid[0] * \
(self.v_array - p_linoid[2]) / (np.exp(p_linoid[1] * (self.v_array - p_linoid[2])) - 1)
self.p_linoid = p_linoid
# This is tau_m of transient Ca2+ current (eq. 7) from
# Huguenard and McCormick, J Neurophysiol, 68:1373-1383,
# 1992.;
#1e-3 * (0.612 + 1 / (np.exp((self.v_array*1e3 + 132)/-16.7) + np.exp((self.v_array*1e3 + 16.8)/18.2)))
p_dblexp = (1e-3, -1 / 16.7e-3, -132e-3, 1 / 18.2e-3, -16.8e-3,
0.612e-3)
self.dblexp = p_dblexp[5] + p_dblexp[0] / (
np.exp(p_dblexp[1] * (self.v_array - p_dblexp[2])) +
np.exp(p_dblexp[3] * (self.v_array - p_dblexp[4])))
self.p_dblexp = p_dblexp
def test_sigmoid(self):
print('Testing sigmoid')
fn, params = hhfit.find_ratefn(self.v_array, self.sigmoid)
print('Sigmoid params original:', self.p_sigmoid, 'detected:', params)
self.assertEqual(hhfit.sigmoid, fn)
rms_error = np.sqrt(
np.mean((self.sigmoid - fn(self.v_array, *params))**2))
self.assertAlmostEqual(rms_error / max(abs(self.sigmoid)),
0.0,
places=3)
plt.plot(self.v_array, self.sigmoid, 'y-', self.v_array,
hhfit.sigmoid(self.v_array, *self.p_sigmoid), 'b--',
self.v_array, fn(self.v_array, *params), 'r-.')
plt.legend('original sigmoid %s fitted %s' % (self.p_sigmoid, fn))
plt.savefig("__test_sigmoid.png")
def test_exponential(self):
print('Testing exponential')
fn, params = hhfit.find_ratefn(self.v_array, self.exp)
print('Exponential params original:', self.p_exp, 'detected:', params)
if params is not None:
# The `find_ratefn` might return a parameter array for different
# function sometimes. exponential takes only upto 5 parameters.
fnval = hhfit.exponential(self.v_array, *params[:4])
self.assertEqual(hhfit.exponential, fn)
# The same exponential can be satisfied by an infinite number
# of parameter values. Hence we cannot compare the parameters,
# but only the fit
rms_error = np.sqrt(np.sum((self.exp - fnval)**2))
print(rms_error, rms_error / max(self.exp))
self.assertAlmostEqual(rms_error / max(self.exp), 0.0, places=3)
plt.plot(self.v_array, self.exp, 'y-', self.v_array,
hhfit.exponential(self.v_array, *self.p_exp), 'b--',
self.v_array, fnval, 'r-.')
plt.legend('original exp %s fitted %s' % (self.p_exp, fn))
out = "__test_exponential.png"
plt.savefig(out)
print('Plot is saved saved to %s' % out)
else:
print("[INFO ] Failed find a suitable approximation...")
def test_linoid(self):
print('Testing linoid')
fn, params = hhfit.find_ratefn(self.v_array, self.linoid)
if params is not None:
print('Linoid params original:', self.p_linoid, 'detected:', params)
self.assertEqual(hhfit.linoid, fn)
fnval = fn(self.v_array, *params)
rms_error = np.sqrt(np.mean((self.linoid - fnval)**2))
self.assertAlmostEqual(rms_error / max(self.linoid), 0.0, places=3)
plt.plot(self.v_array, self.linoid, 'y-', self.v_array,
hhfit.linoid(self.v_array, *self.p_linoid), 'b--',
self.v_array, fn(self.v_array, *params), 'r-.')
plt.legend('Original linoid %s fitted %s' % (self.p_linoid, fn))
out = "__test_linoid.png"
plt.savefig(out)
print('Plot is saved saved to %s' % out)
else:
print('Failed to find a suitable fit.')
def test_dblexponential(self):
print('Testing double exponential')
fn, params = hhfit.find_ratefn(self.v_array, self.dblexp)
fnval = fn(self.v_array, *params)
plt.plot(self.v_array, self.dblexp, 'y-', self.v_array,
hhfit.double_exp(self.v_array, *self.p_dblexp), 'b--',
self.v_array, fnval, 'r-.')
self.assertEqual(hhfit.double_exp, fn)
rms_error = np.sqrt(np.mean((self.dblexp - fnval)**2))
print(params, rms_error)
self.assertAlmostEqual(rms_error / max(self.dblexp), 0.0, places=3)
plt.legend('Original dblexp %s, fitted %s' %(self.dblexp, fn))
out = "__test_dblexponential.png"
plt.savefig(out)
print('Plot is saved saved to %s' % out)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
irhete/predictive-monitoring-benchmark | preprocessing/preprocess_logs_hospital_billing.py | 1 | 5225 | import pandas as pd
import numpy as np
import os
import sys
input_data_folder = "../orig_logs"
output_data_folder = "../labeled_logs_csv_processed"
in_filename = "Hospital Billing - Event Log.csv"
case_id_col = "Case ID"
activity_col = "Activity"
timestamp_col = "Complete Timestamp"
label_col = "label"
pos_label = "deviant"
neg_label = "regular"
category_freq_threshold = 10
# features for classifier
dynamic_cat_cols = ["Activity", 'Resource', 'actOrange', 'actRed', 'blocked', 'caseType', 'diagnosis', 'flagC', 'flagD', 'msgCode', 'msgType', 'state', 'version', 'isCancelled', 'isClosed', 'closeCode']
static_cat_cols = ['speciality']
dynamic_num_cols = ['msgCount']
static_num_cols = []
static_cols = static_cat_cols + static_num_cols + [case_id_col]
dynamic_cols = dynamic_cat_cols + dynamic_num_cols + [timestamp_col]
cat_cols = dynamic_cat_cols + static_cat_cols
def extract_timestamp_features(group):
group = group.sort_values(timestamp_col, ascending=False, kind='mergesort')
tmp = group[timestamp_col] - group[timestamp_col].shift(-1)
tmp = tmp.fillna(0)
group["timesincelastevent"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'm'))) # m is for minutes
tmp = group[timestamp_col] - group[timestamp_col].iloc[-1]
tmp = tmp.fillna(0)
group["timesincecasestart"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'm'))) # m is for minutes
group = group.sort_values(timestamp_col, ascending=True, kind='mergesort')
group["event_nr"] = range(1, len(group) + 1)
return group
def check_if_activity_exists(group, activity, cut_from_idx=True):
relevant_activity_idxs = np.where(group[activity_col] == activity)[0]
if len(relevant_activity_idxs) > 0:
idx = relevant_activity_idxs[0]
group[label_col] = pos_label
if cut_from_idx:
return group[:idx]
else:
return group
else:
group[label_col] = neg_label
return group
def check_if_attribute_exists(group, attribute, cut_from_idx=True):
group[label_col] = neg_label if True in list(group[attribute]) else pos_label
relevant_idxs = np.where(group[attribute]==True)[0]
if len(relevant_idxs) > 0:
cut_idx = relevant_idxs[0]
if cut_from_idx:
return group[:idx]
else:
return group
else:
return group
data = pd.read_csv(os.path.join(input_data_folder, in_filename), sep=";")
data[case_id_col] = data[case_id_col].fillna("missing_caseid")
data.rename(columns=lambda x: x.replace('(case) ', ''), inplace=True)
# remove incomplete cases
tmp = data.groupby(case_id_col).apply(check_if_any_of_activities_exist, activities=["BILLED", "DELETE", "FIN"])
incomplete_cases = tmp.index[tmp==False]
data = data[~data[case_id_col].isin(incomplete_cases)]
del tmp
data = data[static_cols + dynamic_cols]
# add features extracted from timestamp
data[timestamp_col] = pd.to_datetime(data[timestamp_col])
data["timesincemidnight"] = data[timestamp_col].dt.hour * 60 + data[timestamp_col].dt.minute
data["month"] = data[timestamp_col].dt.month
data["weekday"] = data[timestamp_col].dt.weekday
data["hour"] = data[timestamp_col].dt.hour
data = data.groupby(case_id_col).apply(extract_timestamp_features)
# add inter-case features
print("Extracting open cases...")
sys.stdout.flush()
data = data.sort_values([timestamp_col], ascending=True, kind='mergesort')
dt_first_last_timestamps = data.groupby(case_id_col)[timestamp_col].agg([min, max])
dt_first_last_timestamps.columns = ["start_time", "end_time"]
case_end_times = dt_first_last_timestamps.to_dict()["end_time"]
data["open_cases"] = 0
case_dict_state = {}
for idx, row in data.iterrows():
case = row[case_id_col]
current_ts = row[timestamp_col]
# save the state
data.set_value(idx, 'open_cases', len(case_dict_state))
if current_ts >= case_end_times[case]:
if case in case_dict_state:
del case_dict_state[case]
else:
case_dict_state[case] = 1
print("Imputing missing values...")
sys.stdout.flush()
# impute missing values
grouped = data.sort_values(timestamp_col, ascending=True, kind='mergesort').groupby(case_id_col)
for col in static_cols + dynamic_cols:
data[col] = grouped[col].transform(lambda grp: grp.fillna(method='ffill'))
data[cat_cols] = data[cat_cols].fillna('missing')
data = data.fillna(0)
# set infrequent factor levels to "other"
for col in cat_cols:
counts = data[col].value_counts()
mask = data[col].isin(counts[counts >= category_freq_threshold].index)
data.loc[~mask, col] = "other"
data = data.sort_values(timestamp_col, ascending=True, kind="mergesort")
# second labeling
dt_labeled = data.groupby(case_id_col).apply(check_if_attribute_exists, attribute="isClosed", cut_from_idx=False)
dt_labeled.drop(['isClosed'], axis=1).to_csv(os.path.join(output_data_folder, "hospital_billing_2.csv"), sep=";", index=False)
del dt_labeled
# third labeling
dt_labeled = data.groupby(case_id_col).apply(check_if_activity_exists, activity="REOPEN", cut_from_idx=True)
dt_labeled.to_csv(os.path.join(output_data_folder, "hospital_billing_3.csv"), sep=";", index=False)
del dt_labeled
| apache-2.0 |
madjelan/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
ronalcc/zipline | tests/test_rolling_panel.py | 20 | 7005 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import deque
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from zipline.utils.data import MutableIndexRollingPanel, RollingPanel
from zipline.finance.trading import with_environment
class TestRollingPanel(unittest.TestCase):
@with_environment()
def test_alignment(self, env):
items = ('a', 'b')
sids = (1, 2)
dts = env.market_minute_window(
env.open_and_closes.market_open[0], 4,
).values
rp = RollingPanel(2, items, sids, initial_dates=dts[1:-1])
frame = pd.DataFrame(
data=np.arange(4).reshape((2, 2)),
columns=sids,
index=items,
)
nan_arr = np.empty((2, 6))
nan_arr.fill(np.nan)
rp.add_frame(dts[-1], frame)
cur = rp.get_current()
data = np.array((((np.nan, np.nan),
(0, 1)),
((np.nan, np.nan),
(2, 3))),
float)
expected = pd.Panel(
data,
major_axis=dts[2:],
minor_axis=sids,
items=items,
)
expected.major_axis = expected.major_axis.tz_localize('utc')
tm.assert_panel_equal(
cur,
expected,
)
rp.extend_back(dts[:-2])
cur = rp.get_current()
data = np.array((((np.nan, np.nan),
(np.nan, np.nan),
(np.nan, np.nan),
(0, 1)),
((np.nan, np.nan),
(np.nan, np.nan),
(np.nan, np.nan),
(2, 3))),
float)
expected = pd.Panel(
data,
major_axis=dts,
minor_axis=sids,
items=items,
)
expected.major_axis = expected.major_axis.tz_localize('utc')
tm.assert_panel_equal(
cur,
expected,
)
@with_environment()
def test_get_current_multiple_call_same_tick(self, env):
"""
In old get_current, each call the get_current would copy the data. Thus
changing that object would have no side effects.
To keep the same api, make sure that the raw option returns a copy too.
"""
def data_id(values):
return values.__array_interface__['data']
items = ('a', 'b')
sids = (1, 2)
dts = env.market_minute_window(
env.open_and_closes.market_open[0], 4,
).values
rp = RollingPanel(2, items, sids, initial_dates=dts[1:-1])
frame = pd.DataFrame(
data=np.arange(4).reshape((2, 2)),
columns=sids,
index=items,
)
nan_arr = np.empty((2, 6))
nan_arr.fill(np.nan)
rp.add_frame(dts[-1], frame)
# each get_current call makea a copy
cur = rp.get_current()
cur2 = rp.get_current()
assert data_id(cur.values) != data_id(cur2.values)
# make sure raw follow same logic
raw = rp.get_current(raw=True)
raw2 = rp.get_current(raw=True)
assert data_id(raw) != data_id(raw2)
class TestMutableIndexRollingPanel(unittest.TestCase):
def test_basics(self, window=10):
items = ['bar', 'baz', 'foo']
minor = ['A', 'B', 'C', 'D']
rp = MutableIndexRollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=30, tz='utc')
major_deque = deque(maxlen=window)
frames = {}
for i, date in enumerate(dates):
frame = pd.DataFrame(np.random.randn(3, 4), index=items,
columns=minor)
rp.add_frame(date, frame)
frames[date] = frame
major_deque.append(date)
result = rp.get_current()
expected = pd.Panel(frames, items=list(major_deque),
major_axis=items, minor_axis=minor)
tm.assert_panel_equal(result, expected.swapaxes(0, 1))
def test_adding_and_dropping_items(self, n_items=5, n_minor=10, window=10,
periods=30):
np.random.seed(123)
items = deque(range(n_items))
minor = deque(range(n_minor))
expected_items = deque(range(n_items))
expected_minor = deque(range(n_minor))
first_non_existant = max(n_items, n_minor) + 1
# We want to add new columns with random order
add_items = np.arange(first_non_existant, first_non_existant + periods)
np.random.shuffle(add_items)
rp = MutableIndexRollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=periods, tz='utc')
frames = {}
expected_frames = deque(maxlen=window)
expected_dates = deque()
for i, (date, add_item) in enumerate(zip(dates, add_items)):
frame = pd.DataFrame(np.random.randn(n_items, n_minor),
index=items, columns=minor)
if i >= window:
# Old labels and dates should start to get dropped at every
# call
del frames[expected_dates.popleft()]
expected_minor.popleft()
expected_items.popleft()
expected_frames.append(frame)
expected_dates.append(date)
rp.add_frame(date, frame)
frames[date] = frame
result = rp.get_current()
np.testing.assert_array_equal(sorted(result.minor_axis.values),
sorted(expected_minor))
np.testing.assert_array_equal(sorted(result.items.values),
sorted(expected_items))
tm.assert_frame_equal(frame.T,
result.ix[frame.index, -1, frame.columns])
expected_result = pd.Panel(frames).swapaxes(0, 1)
tm.assert_panel_equal(expected_result,
result)
# Insert new items
minor.popleft()
minor.append(add_item)
items.popleft()
items.append(add_item)
expected_minor.append(add_item)
expected_items.append(add_item)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.