repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
pratapvardhan/pandas
|
pandas/tests/io/msgpack/test_read_size.py
|
22
|
1870
|
"""Test Unpacker's read_array_header and read_map_header methods"""
from pandas.io.msgpack import packb, Unpacker, OutOfData
UnexpectedTypeException = ValueError
def test_read_array_header():
unpacker = Unpacker()
unpacker.feed(packb(['a', 'b', 'c']))
assert unpacker.read_array_header() == 3
assert unpacker.unpack() == b'a'
assert unpacker.unpack() == b'b'
assert unpacker.unpack() == b'c'
try:
unpacker.unpack()
assert 0, 'should raise exception'
except OutOfData:
assert 1, 'okay'
def test_read_map_header():
unpacker = Unpacker()
unpacker.feed(packb({'a': 'A'}))
assert unpacker.read_map_header() == 1
assert unpacker.unpack() == B'a'
assert unpacker.unpack() == B'A'
try:
unpacker.unpack()
assert 0, 'should raise exception'
except OutOfData:
assert 1, 'okay'
def test_incorrect_type_array():
unpacker = Unpacker()
unpacker.feed(packb(1))
try:
unpacker.read_array_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_incorrect_type_map():
unpacker = Unpacker()
unpacker.feed(packb(1))
try:
unpacker.read_map_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_correct_type_nested_array():
unpacker = Unpacker()
unpacker.feed(packb({'a': ['b', 'c', 'd']}))
try:
unpacker.read_array_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_incorrect_type_nested_map():
unpacker = Unpacker()
unpacker.feed(packb([{'a': 'b'}]))
try:
unpacker.read_map_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
|
bsd-3-clause
|
HoerTech-gGmbH/openMHA
|
mha/doc/flowcharts/dc_simple_in_out.py
|
1
|
2870
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the HörTech Open Master Hearing Aid (openMHA)
# Copyright © 2018 2020 HörTech gGmbH
#
# openMHA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# openMHA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License, version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public License,
# version 3 along with openMHA. If not, see <http://www.gnu.org/licenses/>.
# This python file recreates the plot stored in file dc_simple_in_out.py.
# It is not executed automatically by the build system builds because it needs
# python-matplotlib installed on the build agents. To avoid creating more
# build dependencies, the generated .png file is also stored in git.
from matplotlib import pyplot as plt
import numpy as np
from math import atan2,atan
def main():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.xticks([20,50,80])
plt.yticks([50, 70, 80, 90, 100])
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
ax.set_yticklabels(['50', '', '80', '', 'MPO'])
ax.set_xticklabels(['noise gate','50','80'])
plt.ylabel(r'$\mathrm{L_{Out}}\,$/dB',size=14)
plt.xlabel(r'$\mathrm{L_{In}}\,$/dB',size=14)
ax.set_ylim([0,110])
ax.set_xlim([0,110])
x0=[0,20]
y0=[110/3, 50]
x1=[15, 20, 50, 80,95, 120 ]
y1=[0, 50, 70, 90, 100, 100 ]
x2=[0,x1[-1]]
y2=x2
plt.vlines([50], 0, 70,linestyles='dotted')
plt.vlines([80], 0, 90,linestyles='dotted')
plt.hlines([50], 0, 50,linestyles='dotted')
plt.hlines([70], 0, 50,linestyles='dotted')
plt.hlines([80], 0, 80,linestyles='dotted')
plt.hlines([90], 0, 80,linestyles='dotted')
plt.hlines([100], 0, 120,linestyles='dotted')
plt.vlines([20], 0, 50,linestyles='dotted')
points=np.array((15,25)).reshape((1,2))
trans_angle=plt.gca().transData.transform_angles(np.array((atan2(50,5)*180/3.1415,)),points)[0]
plt.annotate("expansion", xy=(19.5,45),va='top',ha='right',rotation=trans_angle)
plt.annotate("", xy=(15,50), xytext=(15,70),arrowprops=dict(arrowstyle='<->'))
plt.annotate("G50",xy=(14,60),va='center',ha='right')
plt.annotate("", xy=(25,80), xytext=(25,90),arrowprops=dict(arrowstyle='<->'))
plt.annotate("G80",xy=(24,85),va='center',ha='right')
plt.plot(x0,y0,linestyle='dotted',color='black')
plt.plot(x1,y1)
plt.plot(x2,y2,color='black')
plt.tight_layout()
plt.savefig("dc_simple_in_out.png")
plt.show()
with plt.style.context(u'seaborn-paper'):
main()
|
agpl-3.0
|
GuessWhoSamFoo/pandas
|
pandas/tests/series/test_constructors.py
|
1
|
46677
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
from numpy import nan
import numpy.ma as ma
import pytest
from pandas._libs import lib
from pandas._libs.tslib import iNaT
from pandas.compat import PY36, long, lrange, range, zip
from pandas.core.dtypes.common import (
is_categorical_dtype, is_datetime64tz_dtype)
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, IntervalIndex, MultiIndex, NaT, Series,
Timestamp, date_range, isna, period_range, timedelta_range)
from pandas.api.types import CategoricalDtype
from pandas.core.arrays import period_array
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
class TestSeriesConstructors():
def test_invalid_dtype(self):
# GH15520
msg = 'not understood'
invalid_list = [pd.Timestamp, 'pd.Timestamp', list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name='time', dtype=dtype)
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.])) == 1.0
assert int(Series([1.])) == 1
assert long(Series([1.])) == 1
def test_constructor(self, datetime_series, empty_series):
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
assert not Series({}).index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = 'Series'
rs = Series(mixed).name
xp = 'Series'
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize('input_class', [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype='float64')
empty2 = Series(input_class(), dtype='float64')
assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype='category')
empty2 = Series(input_class(), dtype='category')
assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
empty = Series(index=lrange(10))
empty2 = Series(input_class(), index=lrange(10))
assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=lrange(10))
empty2 = Series(input_class(), index=lrange(10), dtype='float64')
assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series('', dtype=str, index=range(3))
empty2 = Series('', index=range(3))
assert_series_equal(empty, empty2)
@pytest.mark.parametrize('input_arg', [np.nan, float('nan')])
def test_constructor_nan(self, input_arg):
empty = Series(dtype='float64', index=lrange(10))
empty2 = Series(input_arg, index=lrange(10))
assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize('dtype', [
'f8', 'i8', 'M8[ns]', 'm8[ns]', 'category', 'object',
'datetime64[ns, UTC]',
])
@pytest.mark.parametrize('index', [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
result = pd.Series(index=['b', 'a', 'c'])
assert result.index.tolist() == ['b', 'a', 'c']
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize('item', ['entry', 'ѐ', 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(['x', None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(['x', np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ['d', 'b', 'a', 'c']
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype='int64')
result = Series(Iter(), dtype='int64')
assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype='int64')
result = Series(range(10), dtype='int64')
assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(['abc'])
result = Series('abc')
assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype='int64')
for obj in [[1, 2, 3], (1, 2, 3),
np.array([1, 2, 3], dtype='int64')]:
result = Series(obj, index=[0, 1, 2])
assert_series_equal(result, expected)
@pytest.mark.parametrize('input_vals', [
([1, 2]),
(['1', '2']),
(list(pd.date_range('1/1/2011', periods=2, freq='H'))),
(list(pd.date_range('1/1/2011', periods=2, freq='H',
tz='US/Eastern'))),
([pd.Interval(left=0, right=5)]),
])
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(['1.0', '2.0', np.nan], dtype=object)
assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(lrange(10))
assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(lrange(10))
assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'],
fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]),
dtype='int64')
expected = pd.Series([1, 2, 3], dtype='int64')
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype='category')
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype='category')
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
# test basic creation / coercion of categoricals
s = Series(factor, name='A')
assert s.dtype == 'category'
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == 'B'
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
[1, 'John P. Doe']],
columns=['person_id', 'person_name'])
x['person_name'] = Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(['a', 'b'],
dtype=CategoricalDtype(['a', 'b', 'c'],
ordered=True))
assert is_categorical_dtype(result) is True
tm.assert_index_equal(result.cat.categories, pd.Index(['a', 'b', 'c']))
assert result.cat.ordered
result = pd.Series(['a', 'b'], dtype=CategoricalDtype(['b', 'a']))
assert is_categorical_dtype(result)
tm.assert_index_equal(result.cat.categories, pd.Index(['b', 'a']))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series('a', index=[0, 1],
dtype=CategoricalDtype(['a', 'b'], ordered=True))
expected = Series(['a', 'a'], index=[0, 1],
dtype=CategoricalDtype(['a', 'b'], ordered=True))
tm.assert_series_equal(result, expected, check_categorical=True)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(['a', 'b', 'c'],
dtype=CategoricalDtype(['a', 'b']))
right = pd.Series(pd.Categorical(['a', 'b', np.nan],
categories=['a', 'b']))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3, ), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype=int)
result = Series(data)
expected = Series([nan, nan, nan], dtype=float)
assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0, nan, 2], index=index, dtype=float)
assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype=bool)
result = Series(data)
expected = Series([nan, nan, nan], dtype=object)
assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([True, nan, False], index=index, dtype=object)
assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype='M8[ns]')
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype='M8[ns]')
assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), iNaT,
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2),
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3, ), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([nan, nan, nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize('input', [[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(['a', 'b', 'a']),
(i for i in range(3)),
map(lambda x: x, range(3))])
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = 'Length of passed values is 3, index implies 4'
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype='int64')
expected = Series(100, index=np.arange(4), dtype='int64')
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(['foo'], index=['a', 'b', 'c'])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1., 1., 8.]), dtype='i8')
assert s.dtype == np.dtype('i8')
s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8')
assert s.dtype == np.dtype('f8')
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.], np.array([1.])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.
assert not x.equals(y)
assert x[0] == 2.
assert y[0] == 1.
@pytest.mark.parametrize(
"index",
[
pd.date_range('20170101', periods=3, tz='US/Eastern'),
pd.date_range('20170101', periods=3),
pd.timedelta_range('1 day', periods=3),
pd.period_range('2012Q1', periods=3, freq='Q'),
pd.Index(list('abc')),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3)],
ids=lambda x: type(x).__name__)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._data.blocks[0].values is not index
def test_constructor_pass_none(self):
s = Series(None, index=lrange(5))
assert s.dtype == np.float64
s = Series(None, index=lrange(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == 'datetime64[ns]'
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = 'Trying to coerce negative values to unsigned integers'
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp('20130101'), 'NOV'], dtype=object)
assert s.iloc[0] == Timestamp('20130101')
assert s.iloc[1] == 'NOV'
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = '216 3T19'.split()
wing1 = '2T15 4H19'.split()
wing2 = '416 4T20'.split()
mat = pd.to_datetime('2016-01-22 2019-09-07'.split())
df = pd.DataFrame(
{'wing1': wing1,
'wing2': wing2,
'mat': mat}, index=belly)
result = df.loc['3T19']
assert result.dtype == object
result = df.loc['216']
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = Series(arr)
assert result.dtype == 'M8[ns]'
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype='M8[ns]', index=lrange(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=lrange(5))
assert not isna(s).all()
s = Series(nan, dtype='M8[ns]', index=lrange(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype='M8[ns]')
assert isna(s[1])
assert s.dtype == 'M8[ns]'
s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')
assert isna(s[1])
assert s.dtype == 'M8[ns]'
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == 'M8[ns]'
s.iloc[0] = np.nan
assert s.dtype == 'M8[ns]'
# GH3414 related
# msg = (r"cannot astype a datetimelike from \[datetime64\[ns\]\] to"
# r" \[int32\]")
# with pytest.raises(TypeError, match=msg):
# Series(Series(dates).astype('int') / 1000000, dtype='M8[ms]')
pytest.raises(TypeError, lambda x: Series(
Series(dates).astype('int') / 1000000, dtype='M8[ms]'))
msg = (r"The 'datetime64' dtype has no unit\. Please pass in"
r" 'datetime64\[ns\]' instead\.")
with pytest.raises(ValueError, match=msg):
Series(dates, dtype='datetime64')
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp('20130101'), 1], index=['a', 'b'])
assert result['a'] == Timestamp('20130101')
assert result['b'] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M')
values2 = dates.view(np.ndarray).astype('datetime64[ns]')
expected = Series(values2, index=dates)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, dates)
assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, index=dates, dtype=object)
assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()],
dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
assert str(Series(dr).iloc[0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
assert str(Series(dr).iloc[0].tz) == 'US/Eastern'
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == 'object'
assert s[2] is pd.NaT
assert 'NaT' in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == 'object'
assert s[2] is pd.NaT
assert 'NaT' in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == 'object'
assert s[2] is np.nan
assert 'NaN' in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr)
assert s.dtype.name == 'datetime64[ns, US/Eastern]'
assert s.dtype == 'datetime64[ns, US/Eastern]'
assert is_datetime64tz_dtype(s.dtype)
assert 'datetime64[ns, US/Eastern]' in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == 'datetime64[ns]'
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize('UTC').tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern', freq='D')
result = s[0]
assert result == Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern', freq='D')
result = s[Series([True, True, False], index=s.index)]
assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
assert_series_equal(result, s)
# short str
assert 'datetime64[ns, US/Eastern]' in str(s)
# formatting with NaT
result = s.shift()
assert 'datetime64[ns, US/Eastern]' in str(result)
assert 'NaT' in str(result)
# long str
t = Series(date_range('20130101', periods=1000, tz='US/Eastern'))
assert 'datetime64[ns, US/Eastern]' in str(t)
result = pd.DatetimeIndex(s, freq='infer')
tm.assert_index_equal(result, dr)
# inference
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')])
assert s.dtype == 'datetime64[ns, US/Pacific]'
assert lib.infer_dtype(s, skipna=True) == 'datetime64'
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')])
assert s.dtype == 'object'
assert lib.infer_dtype(s, skipna=True) == 'datetime'
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype='datetime64[ns, US/Eastern]')
expected = Series(pd.DatetimeIndex(['NaT', 'NaT'], tz='US/Eastern'))
assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D'])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('arg',
['2013-01-01 00:00:00', pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype='datetime64[ns, CET]')
expected = Series(pd.Timestamp(arg)).dt.tz_localize('CET')
assert_series_equal(result, expected)
def test_construction_interval(self):
# construction from interval & array of intervals
index = IntervalIndex.from_breaks(np.arange(3), closed='right')
result = Series(index)
repr(result)
str(result)
tm.assert_index_equal(Index(result.values), index)
result = Series(index.values)
tm.assert_index_equal(Index(result.values), index)
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range('20130101', periods=3, tz='US/Eastern'))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert('UTC'), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
def test_constructor_infer_period(self):
data = [pd.Period('2000', 'D'), pd.Period('2001', 'D'), None]
result = pd.Series(data)
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == 'Period[D]'
data = np.asarray(data, dtype=object)
tm.assert_series_equal(result, expected)
assert result.dtype == 'Period[D]'
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period('2000', 'D'), pd.Period('2001', 'A')]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range('20130101', periods=5, freq='D')
s = Series(pi)
assert s.dtype == 'Period[D]'
expected = Series(pi.astype(object))
assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {'a': 0., 'b': 1., 'c': 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx)
expected.iloc[0] = 0
expected.iloc[1] = 1
assert_series_equal(result, expected)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {'b': 1, 'a': 0, 'c': 2}
result = Series(d)
if PY36:
expected = Series([1, 0, 2], index=list('bac'))
else:
expected = Series([0, 1, 2], index=list('abc'))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18480
d = {1: 'a', value: 'b', float('nan'): 'c', 4: 'd'}
result = Series(d).sort_values()
expected = Series(['a', 'b', 'c', 'd'], index=[1, value, np.nan, 4])
assert_series_equal(result, expected)
# MultiIndex:
d = {(1, 1): 'a', (2, np.nan): 'b', (3, value): 'c'}
result = Series(d).sort_values()
expected = Series(['a', 'b', 'c'],
index=Index([(1, 1), (2, np.nan), (3, value)]))
assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
assert_series_equal(result_datetime64, expected)
assert_series_equal(result_datetime, expected)
assert_series_equal(result_Timestamp, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
assert list(s) == data
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
assert tuple(s) == data
def test_constructor_dict_of_tuples(self):
data = {(1, 2): 3,
(None, 5): 6}
result = Series(data).sort_values()
expected = Series([3, 6],
index=MultiIndex.from_tuples([(1, 2), (None, 5)]))
tm.assert_series_equal(result, expected)
def test_constructor_set(self):
values = {1, 2, 3, 4, 5}
with pytest.raises(TypeError, match="'set' type is unordered"):
Series(values)
values = frozenset(values)
with pytest.raises(TypeError, match="'frozenset' type is unordered"):
Series(values)
# https://github.com/pandas-dev/pandas/issues/22698
@pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning")
def test_fromDict(self):
data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
series = Series(data)
assert tm.is_sorted(series.index)
data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}
series = Series(data)
assert series.dtype == np.object_
data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'}
series = Series(data)
assert series.dtype == np.object_
data = {'a': '0', 'b': '1'}
series = Series(data, dtype=float)
assert series.dtype == np.float64
def test_fromValue(self, datetime_series):
nans = Series(np.NaN, index=datetime_series.index)
assert nans.dtype == np.float_
assert len(nans) == len(datetime_series)
strings = Series('foo', index=datetime_series.index)
assert strings.dtype == np.object_
assert len(strings) == len(datetime_series)
d = datetime.now()
dates = Series(d, index=datetime_series.index)
assert dates.dtype == 'M8[ns]'
assert len(dates) == len(datetime_series)
# GH12336
# Test construction of categorical series from value
categorical = Series(0, index=datetime_series.index, dtype="category")
expected = Series(0, index=datetime_series.index).astype("category")
assert categorical.dtype == 'category'
assert len(categorical) == len(datetime_series)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
td = Series([timedelta(days=1)])
assert td.dtype == 'timedelta64[ns]'
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(
1, 's')])
assert td.dtype == 'timedelta64[ns]'
# mixed with NaT
td = Series([timedelta(days=1), NaT], dtype='m8[ns]')
assert td.dtype == 'timedelta64[ns]'
td = Series([timedelta(days=1), np.nan], dtype='m8[ns]')
assert td.dtype == 'timedelta64[ns]'
td = Series([np.timedelta64(300000000), pd.NaT], dtype='m8[ns]')
assert td.dtype == 'timedelta64[ns]'
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), NaT])
assert td.dtype == 'timedelta64[ns]'
# because iNaT is int, not coerced to timedelta
td = Series([np.timedelta64(300000000), iNaT])
assert td.dtype == 'object'
td = Series([np.timedelta64(300000000), np.nan])
assert td.dtype == 'timedelta64[ns]'
td = Series([pd.NaT, np.timedelta64(300000000)])
assert td.dtype == 'timedelta64[ns]'
td = Series([np.timedelta64(1, 's')])
assert td.dtype == 'timedelta64[ns]'
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# pytest.raises(TypeError, td.astype, 'm8[%s]' % t)
# valid astype
td.astype('int64')
# invalid casting
msg = (r"cannot astype a timedelta from \[timedelta64\[ns\]\] to"
r" \[int32\]")
with pytest.raises(TypeError, match=msg):
td.astype('int32')
# this is an invalid casting
msg = "Could not convert object to NumPy timedelta"
with pytest.raises(ValueError, match=msg):
Series([timedelta(days=1), 'foo'], dtype='m8[ns]')
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ['foo'])
assert td.dtype == 'object'
# these will correctly infer a timedelta
s = Series([None, pd.NaT, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
s = Series([np.nan, pd.NaT, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
s = Series([pd.NaT, None, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
s = Series([pd.NaT, np.nan, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
# GH 16406
def test_constructor_mixed_tz(self):
s = Series([Timestamp('20130101'),
Timestamp('20130101', tz='US/Eastern')])
expected = Series([Timestamp('20130101'),
Timestamp('20130101', tz='US/Eastern')],
dtype='object')
assert_series_equal(s, expected)
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
assert isna(val)
series[2] = val
assert isna(series[2])
def test_NaT_cast(self):
# GH10747
result = Series([np.nan]).astype('M8[ns]')
expected = Series([NaT])
assert_series_equal(result, expected)
def test_constructor_name_hashable(self):
for n in [777, 777., 'name', datetime(2001, 11, 11), (1, ), u"\u05D0"]:
for data in [[1, 2, 3], np.ones(3), {'a': 0, 'b': 1}]:
s = Series(data, name=n)
assert s.name == n
def test_constructor_name_unhashable(self):
msg = r"Series\.name must be a hashable type"
for n in [['name_list'], np.ones(2), {1: 2}]:
for data in [['name_list'], np.ones(2), {1: 2}]:
with pytest.raises(TypeError, match=msg):
Series(data, name=n)
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
assert series.dtype == 'M8[ns]'
def test_convert_non_ns(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype='timedelta64[s]')
s = Series(arr)
expected = Series(pd.timedelta_range('00:00:01', periods=3, freq='s'))
assert_series_equal(s, expected)
# convert from a numpy array of non-ns datetime64
# note that creating a numpy datetime64 is in LOCAL time!!!!
# seems to work for M8[D], but not for M8[s]
s = Series(np.array(['2013-01-01', '2013-01-02',
'2013-01-03'], dtype='datetime64[D]'))
assert_series_equal(s, Series(date_range('20130101', periods=3,
freq='D')))
# s = Series(np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]'))
# assert_series_equal(s,date_range('20130101
# 00:00:01',period=3,freq='s'))
@pytest.mark.parametrize(
"index",
[
date_range('1/1/2000', periods=10),
timedelta_range('1 day', periods=10),
period_range('2000-Q1', periods=10, freq='Q')],
ids=lambda x: type(x).__name__)
def test_constructor_cant_cast_datetimelike(self, index):
# floats are not ok
msg = "Cannot cast {}.*? to ".format(
# strip Index to convert PeriodIndex -> Period
# We don't care whether the error message says
# PeriodIndex or PeriodArray
type(index).__name__.rstrip("Index")
)
with pytest.raises(TypeError, match=msg):
Series(index, dtype=float)
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(index, dtype=np.int64)
expected = Series(index.astype(np.int64))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
date_range('1/1/2000', periods=10),
timedelta_range('1 day', periods=10),
period_range('2000-Q1', periods=10, freq='Q')],
ids=lambda x: type(x).__name__)
def test_constructor_cast_object(self, index):
s = Series(index, dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
s = Series(pd.Index(index, dtype=object), dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
s = Series(index.astype(object), dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
@pytest.mark.parametrize("dtype", [
np.datetime64,
np.timedelta64,
])
def test_constructor_generic_timestamp_no_frequency(self, dtype):
# see gh-15524, gh-15987
msg = "dtype has no unit. Please pass in"
with pytest.raises(ValueError, match=msg):
Series([], dtype=dtype)
@pytest.mark.parametrize("dtype,msg", [
("m8[ps]", "cannot convert timedeltalike"),
("M8[ps]", "cannot convert datetimelike"),
])
def test_constructor_generic_timestamp_bad_frequency(self, dtype, msg):
# see gh-15524, gh-15987
with pytest.raises(TypeError, match=msg):
Series([], dtype=dtype)
@pytest.mark.parametrize('dtype', [None, 'uint8', 'category'])
def test_constructor_range_dtype(self, dtype):
# GH 16804
expected = Series([0, 1, 2, 3, 4], dtype=dtype or 'int64')
result = Series(range(5), dtype=dtype)
tm.assert_series_equal(result, expected)
def test_constructor_tz_mixed_data(self):
# GH 13051
dt_list = [Timestamp('2016-05-01 02:03:37'),
Timestamp('2016-04-30 19:03:37-0700', tz='US/Pacific')]
result = Series(dt_list)
expected = Series(dt_list, dtype=object)
tm.assert_series_equal(result, expected)
|
bsd-3-clause
|
TheWylieStCoyote/gnuradio
|
gr-utils/plot_tools/plot_data.py
|
3
|
7076
|
#
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
"""
Utility to help plotting data from files.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from argparse import ArgumentParser
import numpy
try:
from pylab import Button, connect, draw, figure, figtext, get_current_fig_manager, show, plot, rcParams
except ImportError:
print("Please install Python Matplotlib (http://matplotlib.sourceforge.net/) and Python TkInter https://wiki.python.org/moin/TkInter to run this script")
raise SystemExit(1)
datatype_lookup = {
"complex64": numpy.complex64,
"float32": numpy.float32,
"uint32": numpy.uint32,
"int32": numpy.int32,
"uint16": numpy.uint16,
"int16": numpy.int16,
"uint8": numpy.uint8,
"int8": numpy.int8,
}
class plot_data(object):
def __init__(self, datatype, filenames, options):
self.hfile = list()
self.legend_text = list()
for f in filenames:
self.hfile.append(open(f, "r"))
self.legend_text.append(f)
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = datatype
if self.datatype is None:
self.datatype = datatype_lookup[options.data_type]
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.40, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_f.get_xlim()
self.manager = get_current_fig_manager()
connect('key_press_event', self.click)
show()
def get_data(self, hfile):
self.text_file_pos.set_text("File Position: %d" % (hfile.tell()//self.sizeof_data))
try:
f = numpy.fromfile(hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print("End of File")
else:
self.f = numpy.array(f)
self.time = numpy.array([i*(1 / self.sample_rate) for i in range(len(self.f))])
def make_plots(self):
self.sp_f = self.fig.add_subplot(2,1,1, position=[0.075, 0.2, 0.875, 0.6])
self.sp_f.set_title(("Amplitude"), fontsize=self.title_font_size, fontweight="bold")
self.sp_f.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_f.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_f = list()
maxval = -1e12
minval = 1e12
for hf in self.hfile:
# if specified on the command-line, set file pointer
hf.seek(self.sizeof_data*self.start, 1)
self.get_data(hf)
# Subplot for real and imaginary parts of signal
self.plot_f += plot(self.time, self.f, 'o-')
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
self.leg = self.sp_f.legend(self.plot_f, self.legend_text)
draw()
def update_plots(self):
maxval = -1e12
minval = 1e12
for hf,p in zip(self.hfile,self.plot_f):
self.get_data(hf)
p.set_data([self.time, self.f])
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.update_plots()
def step_backward(self):
for hf in self.hfile:
# Step back in file position
if(hf.tell() >= 2*self.sizeof_data*self.block_length ):
hf.seek(-2*self.sizeof_data*self.block_length, 1)
else:
hf.seek(-hf.tell(),1)
self.update_plots()
@staticmethod
def setup_options():
description = "Takes a GNU Radio binary file and displays the samples versus time. You can set the block size to specify how many points to read in at a time and the start position in the file. By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples."
parser = ArgumentParser(conflict_handler="resolve", description=description)
parser.add_argument("-d", "--data-type", default="complex64",
choices=("complex64", "float32", "uint32", "int32", "uint16",
"int16", "uint8", "int8"),
help="Specify the data type [default=%(default)r]")
parser.add_argument("-B", "--block", type=int, default=1000,
help="Specify the block size [default=%(default)r]")
parser.add_argument("-s", "--start", type=int, default=0,
help="Specify where to start in the file [default=%(default)r]")
parser.add_argument("-R", "--sample-rate", type=float, default=1.0,
help="Set the sampler rate of the data [default=%(default)r]")
parser.add_argument("files", metavar="FILE", nargs='+',
help="Input file with samples")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
|
gpl-3.0
|
bundgus/python-playground
|
matplotlib-playground/examples/api/patch_collection.py
|
1
|
1269
|
import numpy as np
import matplotlib
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
resolution = 50 # the number of vertices
N = 3
x = np.random.rand(N)
y = np.random.rand(N)
radii = 0.1*np.random.rand(N)
patches = []
for x1, y1, r in zip(x, y, radii):
circle = Circle((x1, y1), r)
patches.append(circle)
x = np.random.rand(N)
y = np.random.rand(N)
radii = 0.1*np.random.rand(N)
theta1 = 360.0*np.random.rand(N)
theta2 = 360.0*np.random.rand(N)
for x1, y1, r, t1, t2 in zip(x, y, radii, theta1, theta2):
wedge = Wedge((x1, y1), r, t1, t2)
patches.append(wedge)
# Some limiting conditions on Wedge
patches += [
Wedge((.3, .7), .1, 0, 360), # Full circle
Wedge((.7, .8), .2, 0, 360, width=0.05), # Full ring
Wedge((.8, .3), .2, 0, 45), # Full sector
Wedge((.8, .3), .2, 45, 90, width=0.10), # Ring sector
]
for i in range(N):
polygon = Polygon(np.random.rand(N, 2), True)
patches.append(polygon)
colors = 100*np.random.rand(len(patches))
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4)
p.set_array(np.array(colors))
ax.add_collection(p)
plt.colorbar(p)
plt.show()
|
mit
|
pythonvietnam/scikit-learn
|
sklearn/ensemble/tests/test_gradient_boosting.py
|
3
|
37680
|
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
|
bsd-3-clause
|
xidus/ted
|
ted/sdss/cas.py
|
1
|
34382
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Version: Sat 27 Apr 2013
# Initial build.
#
"""
Scripts for dealing with the SDSS
Catalogue Archive Server (CAS) and
the results obtained herefrom.
"""
import os
import sys
import time
# Will mechanize work with proxies?
# Maybe I should create a wrapper or use socksipy to have all connections use the proxy server.
import mechanize
# .. : ted
from .. import env
from ..time import format_HHMMSS_diff
from . import load_SNe_candidate_list
_path_data = env.paths.get('data')
_path_das = env.paths.get('das')
_path_cas = env.paths.get('cas')
_proxies = env.proxies
"""
Content
-------
__Functions:__
* get_galaxies
* create_galaxy_list
* plot_gxlist
* build_tlist
* get_fields
* get_field_result
* create_unique_field_list
* filter_invalid_from_unique_field_list
* count_field_records
* query
* field_clean_local_dir
"""
def get_galaxies():
"""
This query takes too long?
"""
ipath = env.paths.get('sql')
fn_sql_galaxies = os.path.join(ipath, 'cas', 'stripe82_galaxies.sql')
with open(fn_sql_galaxies, 'r') as fsock:
sql_galaxies = fsock.read()
print sql_galaxies
print ''
print 'Downloading galaxy objects from PhotoObjAll'
time_total_beg = time.time()
galaxies = query(sql_galaxies)
time_total_end = time.time()
print 'Query executed in {:.0f} seconds'.format(
time_total_end - time_total_beg
)
if 'error' in galaxies.lower():
print('ERROR: {}: CAS says something is wrong ...'.format(
sys._getframe().f_code.co_name)
)
print '\nContent of returned result:\n\n{}'.format(galaxies)
with open(env.files.get('galaxies'), 'w+') as fsock:
fsock.write(galaxies)
def create_galaxy_list():
"""
Create a list of galaxy coordinates `gxlist.csv` by
filtering response from SDSS Skyserver in the following way:
0. Exclude duplicate coordinates.
1. Exclude coordinates that are not within covered stripe area.
2. Exclude coordinates that are too close to any supernova coordinate.
Returns
-------
None
Side effects
------------
File `gxlist.csv` in env.paths.get('cas')
"""
from .cutouts import get_covering_fields
import numpy as np
import pandas as pd
# Manipulation and analysis of geometric objects in the Cartesian plane.
# from shapely.geometry import Polygon, Point
# from shapely.ops import cascaded_union
from IPython import embed
print 'Loading data ...'
cols = dict(usecols=['Ra', 'Dec'])
fcols = dict(usecols=['raMin', 'raMax', 'decMin', 'decMax'])
galaxies = pd.read_csv(env.files.get('galaxies'), sep=',', **cols)
snlist = pd.read_csv(env.files.get('snlist'), sep=';', **cols)
fields = pd.read_csv(env.files.get('fields'), sep=',', **fcols)
# Step 0
# ------
print 'Step 0 : Exclude duplicate coordinates ...'
gra = galaxies.Ra.values
gdec = galaxies.Dec.values
# coords = np.zeros_like(gra).astype(str)
coords = []
for i in np.arange(galaxies.shape[0]):
# coords[i] = '{},{}'.format(gra[i], gdec[i])
coords.append('{},{}'.format(gra[i], gdec[i]))
u_coords, uix = np.unique(coords, return_index=True)
print 'Reducing data for the next step ...'
u_galaxies = galaxies.iloc[uix]
u_gra = gra[uix]
u_gdec = gdec[uix]
# Step 1
# ------
print 'Step 1 : Exclude coordinates that are not within covered stripe area ...'
ipath = os.path.dirname(env.files.get('gxlist'))
ifname = os.path.join(ipath, 'cu_gxlist.csv')
if not os.path.isfile(ifname):
"""This step takes ~40 minutes on my laptop."""
ramin, ramax = fields.raMin.values, fields.raMax.values
decmin, decmax = fields.decMin.values, fields.decMax.values
N_gx = u_galaxies.shape[0]
N_fields = fields.shape[0]
# How many times can I have an array of <N_rows> rows by
N_rows = 100
N_gx_eee = N_gx // N_rows
N_gx_rem = N_gx % N_rows
cix = np.array([]).astype(bool)
# Create vectors and matrices that are repeatedly used
N_fields_ZEROS = np.zeros(N_fields)[None, :]
RAMIN = np.zeros(N_rows)[:, None] + ramin[None, :]
RAMAX = np.zeros(N_rows)[:, None] + ramax[None, :]
DECMIN = np.zeros(N_rows)[:, None] + decmin[None, :]
DECMAX = np.zeros(N_rows)[:, None] + decmax[None, :]
for n in range(N_gx_eee):
# How far are we?
beg = n * N_rows
end = (n + 1) * N_rows
print 'n = {: >4d}; {: >6d}; {: >6d};'.format(n, beg, end)
# Create matrices
GRA = u_gra[beg:end, None] + N_fields_ZEROS
GDEC = u_gdec[beg:end, None] + N_fields_ZEROS
CMP = np.ones((N_rows, N_fields)).astype(bool)
CMP &= (GRA > RAMIN)
CMP &= (GRA < RAMAX)
CMP &= (GDEC > DECMIN)
CMP &= (GDEC < DECMAX)
# Append the booleans to my master index file
cix = np.append(cix, np.any(CMP, axis=1))
# Clean up
del GRA, GDEC, CMP
if N_gx_rem > 0:
# Finally, the remaining less than <N_rows> coordinates
beg = (n + 1) * N_rows
end = beg + N_gx_rem
# Create matrices
GRA = u_gra[beg:end, None] + N_fields_ZEROS
GDEC = u_gdec[beg:end, None] + N_fields_ZEROS
RAMIN = np.zeros(N_gx_rem)[:, None] + ramin[None, :]
RAMAX = np.zeros(N_gx_rem)[:, None] + ramax[None, :]
DECMIN = np.zeros(N_gx_rem)[:, None] + decmin[None, :]
DECMAX = np.zeros(N_gx_rem)[:, None] + decmax[None, :]
CMP = np.ones((N_gx_rem, N_fields)).astype(bool)
CMP &= (GRA > RAMIN)
CMP &= (GRA < RAMAX)
CMP &= (GDEC > DECMIN)
CMP &= (GDEC < DECMAX)
# Append the booleans to my master index file
cix = np.append(cix, np.any(CMP, axis=1))
# Check
print ''
print 'N_gx =', N_gx
print 'cix.size =', cix.size
print 'cix.dtype =', cix.dtype
# Embed so that I do not need to re-do this step again...
embed()
print 'Reducing data for the next step ...'
cu_galaxies = u_galaxies.iloc[cix]
cu_gra = u_gra[cix]
cu_gdec = u_gdec[cix]
else:
print 'Step 1. already performed. Loading result ...'
cu_galaxies = pd.read_csv(ifname, sep=',')
cu_gra = cu_galaxies.Ra.values
cu_gdec = cu_galaxies.Dec.values
# Step 2
# ------
print 'Step 2 : Exclude coordinates that are too close to any supernova coordinate ...'
# Criteria?
# Unknown, but for now it should just lie
# outside the range of the cutout extent, i.e. more than 101 px away.
# 101 px in the SDSS frames correspond to about 10 ** -2 degrees.
criteria_distance = .001 # [deg]
# Count how many rows that are left at this step
N_gx = cu_galaxies.shape[0]
N_sn = snlist.shape[0]
# How many times can I have an array of <N_rows> rows by
N_rows = 10000
N_gx_eee = N_gx // N_rows
N_gx_rem = N_gx % N_rows
dix = np.array([]).astype(bool)
# Create repeatedly used vectors
N_sn_ZEROS = np.zeros(N_sn)[None, :]
RA_sn = np.zeros(N_rows)[:, None] + snlist.Ra.values[None, :]
DEC_sn = np.zeros(N_rows)[:, None] + snlist.Dec.values[None, :]
print 'Creating matrices that can calculate all distances simultaneously ...'
# Loop for as many times as needed
for n in range(N_gx_eee):
beg = n * N_rows
end = (n + 1) * N_rows
# How far are we?
print 'n = {: >4d}; {: >6d}; {: >6d};'.format(n, beg, end)
# Create matrices
# Broadcast shapes to get a N_gx-by-N_sn
RA_gx = cu_gra[beg:end, None] + N_sn_ZEROS
DEC_gx = cu_gdec[beg:end, None] + N_sn_ZEROS
# print 'Calculating differences for each coordinate type ...'
# Differences
dRA = RA_gx - RA_sn
dDEC = DEC_gx - DEC_sn
# print 'Calculating the distances between every possible set of coordinates ...'
# Distances from each coordinate to each supernova
dS = np.sqrt(dRA ** 2 + dDEC ** 2)
# print 'Creating boolean vector for each coordinate ...'
# Are there any SNe too close for a given coordinate?
# Check along the columns, i.e .return boolean vector of rows (galaxies)
# that met the criteria of being far enough away that it should be outside
# a cutout which also has a known SDSS supernova candidate within it.
# distance indices
dix = np.append(dix, np.any(dS > criteria_distance, axis=1))
if N_gx_rem > 0:
# Finally, the remaining less than <N_rows> coordinates
beg = (n + 1) * N_rows
end = beg + N_gx_rem
# Create matrices
RA_gx = cu_gra[beg:end, None] + N_sn_ZEROS
DEC_gx = cu_gdec[beg:end, None] + N_sn_ZEROS
RA_sn = np.zeros(N_gx_rem)[:, None] + snlist.Ra.values[None, :]
DEC_sn = np.zeros(N_gx_rem)[:, None] + snlist.Dec.values[None, :]
# print 'Calculating differences for each coordinate type ...'
# Differences
dRA = RA_gx - RA_sn
dDEC = DEC_gx - DEC_sn
# print 'Calculating the distances between every possible set of coordinates ...'
# Distances from each coordinate to each supernova
dS = np.sqrt(dRA ** 2 + dDEC ** 2)
# print 'Creating boolean vector for each coordinate ...'
# Append the booleans to my master index file
dix = np.append(dix, np.any(dS > criteria_distance, axis=1))
# Check
print 'N_gx =', N_gx
print 'dix.size =', dix.size
print 'Reducing data for the next step ...'
dcu_galaxies = cu_galaxies.iloc[dix]
# Step 3:
# -------
print 'Step 3 : Exclude coordinates that are too close to other non-events ...'
"""
Rationale
---------
When I discovered that I had not checked that the distance between the
galaxy coordinates themselves were not too close to essentially stay out
of each other's cutouts, I realised that I never made sure that I could
order them by observation date and then use only the first-observed object
(or it could be the same, but it would make no difference for my algorithm
to begin with), so that I could argue that I should keep the first one.
As it is (2014-02-26), I do not have any means to use observation
dates, since I do not have the luxury of starting over and begin with
Step 0. Also, More importantly, I do not have time to spend on finding out
how to obtain observation dates from the SDSS database.
As this is basically just a proof of concept, where I just need enough
coordinates that as far as the merged snlists are concerned do not have any
known transient events happening in them that are classified as SNe. There
does not seem to be any reason why I should not just choose arbitrarily
between two coordinates whose cutouts (101x101) will overlap.
Algorithm
---------
1. I go through the list from the top.
2. For each coordinate I calculate the distance to all other coordinates.
3. I get an array of the same length, containing the distances.
4. Entries with coordinates too close to be outside the given coordinate's
cutout `view` will be removed from the list.
5. The now potentially reduced list is used in the next step. Since we
start from the top, the previous coordinate entries will all be there in
the reduced list.
6. Increase the entry index and repeat from step 3. until the end of the
final entry is reached (no more remaining possibly too close coordinates
left).
"""
i = 0
ddcu_galaxies = dcu_galaxies.copy()
while i < ddcu_galaxies.shape[0]:
if i and not i % 1000:
print 'i = {: >5d}'.format(i)
templist = ddcu_galaxies.copy()
entry = templist[i:i + 1]
dRa = entry.Ra.values[0] - templist.Ra.values
dDec = entry.Dec.values[0] - templist.Dec.values
dS = np.sqrt(dRa ** 2 + dDec ** 2)
dix = (dS > criteria_distance)
dix[i] = True
ddcu_galaxies = templist.iloc[dix].copy()
del templist
# Yikes :O !!! This turned out to be important :D
i += 1
# print 'Final size of gxlist: {:,.0f}'.format(ddcu_galaxies.shape[0])
# Step 4:
# -------
print 'Step 4 : Exclude coordinates that are covered by too few fields ...'
"""
This was determined from the coordinates that were in the first tlist
made before this step, where I discovered that some of the choden coordintes
had as little as 1 field covering it. With fewer fields covering, the chance
of getting a number of cutouts in a sequence that matches the average number of cutouts
for the coordinates in snlist is smaller. There could be a bias here.
"""
# Lowest frame count 69.0
# Highest frame count 162.0
MIN_NUMBER_OF_FIELDS = 69
MAX_NUMBER_OF_FIELDS = 162
# Check if enough covering fields
# If not enough, exclude the coordinate.
n_fields = np.array([])
for i in range(ddcu_galaxies.shape[0]):
print '{: >5d}'.format(i),
Ra, Dec = ddcu_galaxies.iloc[i]
N = get_covering_fields(np.array([Ra, Dec])[None, :]).shape[0]
n_fields = np.append(n_fields, N)
print '- Fields: {: >3d}'.format(N)
fix = (
(n_fields >= MIN_NUMBER_OF_FIELDS)
&
(n_fields <= MAX_NUMBER_OF_FIELDS)
)
fddcu_galaxies = ddcu_galaxies.iloc[fix]
print 'Final size of gxlist: {:,.0f}'.format(fddcu_galaxies.shape[0])
# Finalise
# --------
print 'Step finalise : save the resulting list to disk.'
fddcu_galaxies.to_csv(env.files.get('gxlist'), index=False, header=True)
def plot_gxlist():
"""
Generate figure showing the galaxy coordinates within the stripe
plotted over the regions covered by the fields that are available.
"""
pass
###############################################################################
def build_tlist():
"""
Build the event/non-event data set and save it as a file.
"""
import numpy as np
import pandas as pd
from ..parse import ra2deg, dec2deg
if 1:
snlist = pd.read_csv(env.files.get('snlist'), sep=';')
else:
snid_sortable = lambda SDSS_id: 'SN{:0>5d}'.format(int(SDSS_id[2:]))
s2valornan = lambda s: s or np.nan
conv = dict(SDSS_id=snid_sortable, Ra=ra2deg, Dec=dec2deg,
redshift=s2valornan, Peak_MJD=s2valornan)
lkw = dict(sep=';', converters=conv)
snlist = pd.read_csv(env.files.get('snlist_1030'), **lkw)
gxlist = pd.read_csv(env.files.get('gxlist'), sep=',')
# print gxlist.info()
# print gxlist.head(10)
print snlist.info()
print snlist.head(10)
# How many needed in total
N_sne = snlist.shape[0]
N_gx = gxlist.shape[0]
N_needed = np.round(N_sne, decimals=-2) * 2
N_gx_c = N_needed - N_sne
gx_ix = np.unique(np.random.randint(0, N_gx, size=N_gx_c))
while gx_ix.size != N_gx_c:
N_cur = gx_ix.size
N_left = N_gx_c - N_cur
gx_ix = np.unique(
np.append(
gx_ix,
np.random.randint(0, N_gx, size=N_left)
)
)
gx_chosen = gxlist.iloc[gx_ix]
# print gx_chosen.info()
# print gx_chosen.head(10)
# raise SystemExit
# Build data set
ra = np.append(snlist.Ra.values, gx_chosen.Ra.values)
dec = np.append(snlist.Dec.values, gx_chosen.Dec.values)
is_sn = np.append(np.ones(N_sne), np.zeros(N_gx_c)).astype(bool)
# Collect and shuffle the lines, so that I only need to split
# the data set N-fold, when using the data.
dataset = np.array([ra,
dec,
is_sn]).T
# Do in-place shuffle
"""
This is an in-place operation on a view of the original array.
It does not create a new, shuffled array, so there's no need
to transpose the result.
REF: https://stackoverflow.com/questions/20546419/shuffle-columns-of-an-array-with-numpy
"""
np.random.shuffle(dataset)
if 0:
coords = np.array([])
# for i in range(dataset.shape[0]):
# coords = np.append(coords, '{:014.9f}_{:014.9f}'.format(
# dataset[i, 0], dataset[i, 1])
# )
for i in range(snlist.shape[0]):
coords = np.append(coords, '{:014.9f}_{:014.9f}'.format(
snlist.Ra.values[i], snlist.Dec.values[i])
)
ucoords, indices = np.unique(coords, return_inverse=True)
print ucoords.size, np.unique(snlist.SDSS_id.values).size, np.unique(snlist.Peak_MJD.values).size
raise SystemExit
# tlist = pd.DataFrame(data=dict(Ra=ra, Dec=dec, is_sn=is_sn))
tlist = pd.DataFrame(
data=dataset,
columns=['Ra', 'Dec', 'is_sn']
)
print tlist.info()
print tlist.head(10)
tlist.to_csv(env.files.get('tlist'), index=False, header=True)
###############################################################################
def build_tlist_sample(N=5):
"""
Build *SAMPLE* event data set and save it as a file.
"""
import numpy as np
import pandas as pd
snlist = pd.read_csv(env.files.get('snlist'), sep=';')
ra = snlist.Ra.values[:N]
dec = snlist.Dec.values[:N]
is_sn = np.ones(N).astype(bool)
dataset = np.array([ra,
dec,
is_sn]).T
tlist = pd.DataFrame(
data=dataset,
columns=['Ra', 'Dec', 'is_sn']
)
print tlist.info()
print tlist.head(N)
tlist.to_csv(env.files.get('tlist'), index=False, header=True)
###############################################################################
def check_snlist():
"""
Check for duplicates in snlist_1030
"""
import numpy as np
import pandas as pd
from ..parse import ra2deg, dec2deg
# Converters
snid_sortable = lambda SDSS_id: 'SN{:0>5d}'.format(int(SDSS_id[2:]))
s2valornan = lambda s: s or np.nan
if 0:
ifname = env.files.get('snlist_1030')
conv = dict(SDSS_id=snid_sortable, Ra=ra2deg, Dec=dec2deg,
redshift=s2valornan, Peak_MJD=s2valornan)
else:
ifname = env.files.get('snlist')
conv = dict(SDSS_id=snid_sortable, redshift=s2valornan,
Peak_MJD=s2valornan)
lkw = dict(sep=';', converters=conv)
snlist = pd.read_csv(ifname, **lkw)
# Check for duplicate coordinate pairs
coords = np.array([])
for i in range(snlist.shape[0]):
coords = np.append(coords, '{:014.9f}_{:014.9f}'.format(
snlist.Ra.values[i], snlist.Dec.values[i])
)
ucoords, indices = np.unique(coords, return_inverse=True)
print 'Number of list entries: {: >4d}'.format(snlist.shape[0])
print 'Number of unique entries: {: >4d}'.format(ucoords.size)
# print 'Number of unique entry IDs: {: >4d}'.format(np.unique(snlist.SDSS_id.values).size)
duplicates = []
for ix in np.unique(indices):
if (indices == ix).sum() > 1:
duplicates.append(ix)
if duplicates:
coord_indices = []
for ix in duplicates:
print ''
for i, uc in enumerate(ucoords[indices[indices == ix]]):
if i == 0:
coord_indices.append((coords == uc).nonzero()[0])
print uc
print '\nIndices of the original list:', duplicates
print coord_indices
print ''
print 'Entries from snlist:'
for cices in coord_indices:
print
print snlist.iloc[cices]
else:
print 'No duplicates found ...'
###############################################################################
def check_tlist():
"""
For each entry in `tlist.csv`, find out how many fields cover it.
"""
from .cutouts import get_covering_fields
import numpy as np
import pandas as pd
ifname = env.files.get('tlist')
tlist = pd.read_csv(ifname)
sn_fields = []
gx_fields = []
for i in range(tlist.shape[0]):
print '{: >4d} -'.format(i),
Ra, Dec, is_sn = tlist.iloc[i]
n_fields = get_covering_fields(np.array([Ra, Dec])[None, :]).shape[0]
if is_sn:
sn_fields.append(n_fields)
print 'SN',
else:
gx_fields.append(n_fields)
print 'GX',
print '- Fields: {: >3d}'.format(n_fields)
for data, name in zip([sn_fields, gx_fields], ['SN', 'GX']):
ofname = os.path.join(
env.paths.get('data'),
'nfieldrecords_{}.csv'.format(name)
)
with open(ofname, 'w+') as fsock:
fsock.write('\n'.join(np.array(data).astype(str)))
if 0:
import matplotlib as mpl
mpl.use('pdf')
import matplotlib.pyplot as plt
from mplconf import mplrc
from mplconf import rmath
mplrc('publish_digital')
fig, ax = plt.subplots(figsize=(12.5, 4))
ax.hist(sn_fields, bins=100)
ax.hist(gx_fields, bins=100)
ax.set_xlabel(rmath('Number of fields for a coordinate'))
ax.set_ylabel(rmath('Counts'))
fig.tight_layout()
ofname_fig = os.path.join(
env.paths.get('data'),
'tlist_nfieldrecords.pdf'
)
plt.savefig(ofname_fig)
###############################################################################
def get_fields(skip_if_exists=True):
"""For each coordinate in the SNe file, get the frames
from the Field table that cover that coordinate.
Saves each result in a seperate file named <SDSS_ID>.csv .
"""
# Clean up local cas directory
# field_clean_local_dir()
# Do it manually from the main program instead
# This way, if the program is interrupted, it does not need to begin all
# over, since the single-field-getter skips requests that have already been made.
# with open('template_fields_that_cover_SN.sql', 'r') as fsock:
# sql_fields_fstr = fsock.read()
sql_fields_fstr = """\
SELECT
fieldID,
-- skyVersion,
run, rerun, camcol, field,
-- nObjects,
-- numStars_r,
-- nCR_r,
-- nBrightObj_r,
-- nFaintObj_r,
quality, -- Quality of field in terms of acceptance
mjd_r, -- Julian Date when row 0 was read
-- Do I need Astrometric transformation constants?
-- Do I need Airmass measurements?
raMin, raMax, decMin, decMax
FROM
-- Stripe82..Field
Field
WHERE
raMin < {obj_ra} AND {obj_ra} < raMax
AND
decMin < {obj_dec} AND {obj_dec} < decMax
AND
(raMax - raMin) > {dra_min}
AND
(raMax - raMin) < {dra_max}
ORDER BY
run ASC,
rerun ASC,
camcol ASC,
field ASC,
raMin ASC,
decMin ASC
"""
opath = os.path.join(_path_cas, 'fields')
if not os.path.exists(opath):
os.makedirs(opath)
df = load_SNe_candidate_list()
SNe_len = df.count().max()
print 'Beginning search through all confirmed SNe ...'
time_total_beg = time.time()
for i, (ra, dec, SDSS_id) in enumerate(zip(df.Ra, df.Dec, df.SDSS_id)):
sql_fields = sql_fields_fstr.format(obj_ra=ra, obj_dec=dec, dra_min=.1, dra_max=1.)
# get_field_result(sql_fields, SDSS_id)
# Define output structure
ofname = os.path.join(opath, '{}.csv'.format(SDSS_id))
# Don't bother requesting anything if the file already exists
if os.path.isfile(ofname) and skip_if_exists:
return
# Update progress output
s = 'Downloading: SN {: >4d} out of {: >4d}, {} ...\r'.format(
(i + 1), SNe_len, format_HHMMSS_diff(time_total_beg, time.time())
)
sys.stdout.write(s)
sys.stdout.flush()
# Request the data
fields = query(sql_fields)
if 'error' in fields.lower():
sys.exit('ERROR: {}: CAS says something is wrong ...'.format(
sys._getframe().f_code.co_name)
)
# And save it
with open(ofname, 'w+') as fsock:
fsock.write(fields)
sys.stdout.write('\n')
sys.stdout.flush()
time_total_end = time.time()
time_total = format_HHMMSS_diff(time_total_beg, time_total_end)
print 'Done downloading field catalogue ... in {}'.format(time_total)
# print 'Downloaded field catalogues for {} SNe'.format(i+1)
def get_field_result(sql_fields, SDSS_id):
"""Get query results from a *single* request for fields."""
# Define output structure
opath = os.path.join(_path_cas, 'fields')
ofname = os.path.join(opath, '{}.csv'.format(SDSS_id))
# Don't bother requesting anything if the file already exists
if os.path.isfile(ofname):
return
# If the file does not exists, make sure that the path does
if not os.path.exists(opath):
os.makedirs(opath)
# Request the data
fields = query(sql_fields)
if 'error' in fields.lower():
sys.exit('ERROR: {}: CAS says something is wrong ...'.format(
sys._getframe().f_code.co_name)
)
# And save it
with open(ofname, 'w+') as fsock:
fsock.write(fields)
def create_unique_field_list():
"""
Creates a file containing all results from *get_fields()*
and saves it in the CAS root directory.
Looks through folder `fields` in the CAS root directory and loads
all .csv files one at the time and adds the lines in each file to
a list. This list is then sorted and only unique entries are kept
before the list is saved in the CAS root directory.
"""
ipath = os.path.join(_path_cas, 'fields')
iglob = os.path.join(ipath, '*.csv')
ofname = env.files.get('fields')
tfname = os.path.join(_path_cas, 'fields.tmp')
# Clean up first, since the file is only appended to in the following
if os.path.isfile(ofname):
os.remove(ofname)
commands = [
# Build one big file with all the results
'cat {iglob} >> {t}'.format(iglob=iglob, t=tfname),
# Sort and remove duplicates
'cat {t} | sort | uniq > {o}'.format(t=tfname, o=ofname),
# Remove the temporary file
'rm {t}'.format(t=tfname),
]
for cmd in commands:
print cmd
os.system(cmd)
# Move last line (with the CSV headers) to the top
with open(ofname, 'r') as fsock:
lines = fsock.readlines()
lines = [lines[-1]] + lines[:-1]
with open(ofname, 'w') as fsock:
fsock.write(''.join(lines))
def filter_invalid_from_unique_field_list(dra_min=.1, dra_max=1.):
"""
Remove invalid entries from the unique field
list created by *create_unique_field_list()*.
Parameters
----------
dra_min : float
the minimum allowable angle separating the RA start and
end coordinate for a given field in the unique field list.
dra_max : float
the maximum allowable angle separating the RA start and
end coordinate for a given field in the unique field list.
Side effects
------------
<del>Creates a backup of the original field list.</del>
Saves the results back into the original destination.
"""
import pandas as pd
import shutil
fname = env.files.get('fields')
# fn_valid = os.path.splitext(fname)[0] + '_valid.csv'
fn_invalid = os.path.splitext(fname)[0] + '_invalid.csv'
shutil.copyfile(fname, '{}.orig'.format(fname))
df = pd.read_csv(fname, sep=',')
"""
2014-07-28
----------
There is an error in this procedure, but the result is the intended:
to remove fields for which the physical extent---as given by the
coordinates raMax, raMin, decMax, decMin---gives too small or even
negative side lengths. This problem is only observed in the RA
coordinates.
The are five observed cases for where the RA coordinate extents land.
Case 1: raMax \in [ 0; 60] and raMin \in [ 0; 60]
Case 2: raMax \in [ 0; 60] and raMin \in [300; 360]
Case 3: raMax \in [300; 360] and raMin \in [300; 360]
Case 4: raMax \in [ 0; 60] and raMin \in [300; 360]
Case 5: raMax > 360 and raMin \in [ 0; 60]
Case 4 should not occur, since this means that the field is obtained
end-first and beginning-last. These fields are considered invalid.
Case 5 is OK, if subtracting 360 deg from raMax, the value is larger
than raMin. Otherwise, the coordinate difference produces a negative
side length again.
It turns out that this is the case, so these fields are also invalid.
The procedure below removes all fields for which the coordinate difference
raMax - raMin < dra_min = .1 (default). Since this also removes all Case-4
and Case-5 records above, the result is what is intended; but with wrong
assumptions.
Since this is the way that my data were processed, I leave it at that,
if I need to reproduce the field list again.
"""
dra = df.raMax - df.raMin
dra_too_small_ix = (dra < dra_min)
dra_too_large_ix = (dra > dra_max)
df_invalid = df.loc[dra_too_small_ix | dra_too_large_ix]
df_valid = df.loc[(~dra_too_small_ix) & (~dra_too_large_ix)]
df_valid.to_csv(fname, index=False, header=True)
df_invalid.to_csv(fn_invalid, index=False, header=True)
def count_field_records():
"""
Count the number for field records obtained for
each SN, and save those numbers for later plotting.
"""
import glob
import pandas as pd
iglob = os.path.join(_path_cas, 'fields', '*.csv')
filenames = sorted(glob.glob(iglob))
df_fields = pd.read_csv(env.files.get('fields'), sep=',')
counts = []
beg = time.time()
for i, ifname in enumerate(filenames):
df_results = pd.read_csv(ifname, sep=',')
count = 0
for j in range(df_results.shape[0]):
count += (df_fields['fieldID'] == df_results.iloc[j]['fieldID']).sum()
step = time.time()
dt_str = format_HHMMSS_diff(beg, step)
print '{: >4d}, {: >3d}, {}'.format(i, count, dt_str)
counts.append(str(count))
# print len(filenames), len(counts)
with open(env.files.get('nrecords'), 'w+') as fsock:
fsock.write('\n'.join(counts))
def count_field_records_by_quality():
"""
Count the number for field records obtained for
each SN, and save those numbers for later plotting.
"""
import glob
import pandas as pd
iglob = os.path.join(_path_cas, 'fields', '*.csv')
filenames = sorted(glob.glob(iglob))
df_fields = pd.read_csv(env.files.get('fields'), sep=',')
print 'Number of fields:', df_fields.shape[0]
# FieldQuality Data values
# name value description
# BAD 1 Not acceptable for the survey
# ACCEPTABLE 2 Barely acceptable for the survey
# GOOD 3 Fully acceptable -- no desire for better data
# MISSING 4 No objects in the field, because data is missing.
# We accept the field into the survey as a HOLE
# HOLE 5 Data in this field is not acceptable, but we will
# put the field into the survey as a HOLE, meaning
# none of the objects in the field are part of the
# survey.
# See: http://cas.sdss.org/stripe82/en/help/browser/enum.asp?n=FieldQuality
qualities = range(1, 4)
qices = [(df_fields.quality.values == q) for q in qualities]
fdict = {q: df_fields.iloc[qix] for (q, qix) in zip(qualities, qices)}
cdict = {q: [] for q in qualities}
for i, qix in enumerate(qices):
print 'Number of fields with quality {:d}: {: >3d}'.format(
i + 1, qix.sum())
print 'For qualities 4 and 5, there were not present in my filtered dataset'
beg = time.time()
for i, ifname in enumerate(filenames):
df_results = pd.read_csv(ifname, sep=',')
counts = [0] * len(qualities)
for j in range(df_results.shape[0]):
for k, q in enumerate(qualities):
ices = (fdict[q]['fieldID'] == df_results.iloc[j]['fieldID'])
counts[k] += ices.sum()
step = time.time()
Dt = format_HHMMSS_diff(beg, step)
print '{: >4d}, {}: {: >3d}, {: >3d}, {: >3d}'.format(i, Dt, *counts)
for k, q in enumerate(qualities):
cdict[q].append(counts[k])
list_of_lists = [cdict[q] for q in qualities]
with open(env.files.get('nrecords_q'), 'w+') as fsock:
for row in zip(*list_of_lists):
fsock.write('{},{},{}\n'.format(*row))
def query(sql_raw):
"""Sends SQL query to the CAS and returns the raw text of the response."""
# form_data_receive_only_url = 'http://cas.sdss.org/astro/en/tools/search/x_sql.asp'
form_url = 'http://cas.sdss.org/stripe82/en/tools/search/sql.asp'
return_format = 'csv'
sql_filtered = ''
for line in sql_raw.split('\n'):
sql_filtered += line.split('--')[0] + ' ' + os.linesep
# Browser
br = mechanize.Browser()
# User-Agent
br.addheaders = [
(
'User-agent',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:21.0) Gecko/20100101 Firefox/21.0'
)
]
br.open(form_url)
br.select_form(nr=0)
# User credentials
br.form['cmd'] = sql_filtered
br.form['format'] = [return_format]
# Search and return the content of the resulting CSV-file
return br.submit().read()
def field_clean_local_dir():
"""
Todo
----
Make it possible to supply paths to folders that should have stuff removed.
"""
opath = os.path.join(_path_cas, 'fields')
if not os.path.exists(opath):
return
oglob = os.path.join(opath, '*')
cmd = 'rm {}'.format(oglob)
print cmd
if not oglob == '/':
os.system(cmd)
else:
print 'Clean-up command not executed ...'
|
bsd-3-clause
|
boknilev/nmt-repr-analysis
|
utils/analysis-dpr.py
|
1
|
2896
|
import itertools
import pdb
import argparse
import pandas as pd
PRONOUNS = set(("he", "they", "she", "it", "him", "her"))
def get_data(args):
lbls_file = open(args.gold)
src_file = open(args.src)
pred_file = open(args.pred)
idx = {}
for i, trip in enumerate(itertools.izip(lbls_file, pred_file, src_file)):
idx[i] = trip[0], trip[1], trip[2]
return idx
def get_sents_by_word(data, word):
ids = []
for key,val in data.items():
if word in set(val[2].split()):
ids.append(key)
return ids
def get_pair_by_pronoun(data):
pronoun_to_lbl = {}
total_pairs = 0
missed_pronouns = set()
for pronoun in PRONOUNS:
pronoun_to_lbl[pronoun] = {'correct': {'entailed': [], 'not-entailed': []}, 'incorrect': {'entailed': [], 'not-entailed': []} }
for idx, trip in data.items():
src = trip[2].split("|||")
context = src[0]
hyp = src[1]
if pronoun in context.lower() and pronoun not in hyp.lower():
total_pairs += 1
gold = trip[0].strip()
pred = trip[1].strip()
if gold == pred:
pronoun_to_lbl[pronoun]['correct'][gold].append(trip[2])
else:
#incorrect is based off of the gold label, not pred label
pronoun_to_lbl[pronoun]['incorrect'][gold].append(trip[2])
else:
set_diff = set(context.split()).difference(set(hyp.split()))
assert (len(set_diff) == 1, "more than one word difference")
word = list(set_diff)[0]
if word not in PRONOUNS:
missed_pronouns.add(word)
return pronoun_to_lbl
def convert_to_df(pronoun_to_lbl):
data = pd.DataFrame(columns=["Pronoun", "label", "incorrect", "correct"])
for pronoun in pronoun_to_lbl:
for label in pronoun_to_lbl[pronoun]['correct']:
correct_count = len(pronoun_to_lbl[pronoun]['correct'][label])
incorrect_count = len(pronoun_to_lbl[pronoun]['incorrect'][label])
if correct_count == 0 and incorrect_count == 0:
continue
data = data.append({"Pronoun":pronoun, "label":label, "incorrect": incorrect_count, "correct": correct_count}, ignore_index=True)
return data
def main(args):
data = get_data(args) #args.src, args.gold, args.pred)
pronoun_to_lbl = get_pair_by_pronoun(data)
df = convert_to_df(pronoun_to_lbl)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Analysis on dpr.')
parser.add_argument('--src', help="path to source file")
parser.add_argument('--gold', help="path to gold labels file")
parser.add_argument('--pred', help="path to pred labels file")
args = parser.parse_args()
main(args)
#python analysis-dpr.py --src /export/ssd/apoliak/nmt-repr-anaysis-sem/data/rte/cl_dpr_val_source_file --gold /export/ssd/apoliak/nmt-repr-anaysis-sem/data/rte/cl_dpr_val_lbl_file --pred /export/ssd/apoliak/nmt-repr-anaysis-sem/output/dpr-dpr/linear_classifier/en-de/pred_file.epoch2
|
mit
|
JPFrancoia/scikit-learn
|
benchmarks/bench_plot_svd.py
|
325
|
2899
|
"""Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
|
bsd-3-clause
|
uberscientist/activetick_http
|
setup.py
|
1
|
1311
|
from activetick_http import __version__
from os import path
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open(path.join(path.dirname(__file__), 'README.rst')) as f:
long_description = f.read()
setup(
name='activetick_http',
version=__version__,
description='Pandas wrapper for ActiveTick HTTP Proxy',
long_description=long_description,
url='https://github.com/uberscientist/activetick_http',
author='Christopher Toledo',
author_email='[email protected]',
keywords=['activetick', 'finance', 'quant', 'pandas'],
license='MIT',
packages=['activetick_http'],
tests_require=['pytest',
'tabulate',
'redis'
],
package_dir={'activetick_http': 'activetick_http'},
install_requires=[
'pandas',
'requests',
'numpy',
'redis'
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
]
)
|
mit
|
DaveBackus/Data_Bootcamp
|
Code/Lab/IMF_historical_debt.py
|
1
|
2700
|
"""
IMF historical debt data
https://www.imf.org/External/pubs/cat/longres.aspx?sk=24332.0
rows are countries, columns are dates (1692-2012)
Prepared for Data Bootcamp course at NYU
* https://github.com/DaveBackus/Data_Bootcamp
* https://github.com/DaveBackus/Data_Bootcamp/Code/Python
Written by Hersh Iyer and Itamar Snir, November 2015
Created with Python 3.5
"""
import pandas as pd
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.pyplot as plt
# data input
excelFilePath = '../Temp/Debt Database Fall 2013 Vintage.xlsx'
debt = pd.read_excel(excelFilePath, sheetname=1, na_values=['…', '….', ''])
debt = debt.drop([debt.columns[1], debt.columns[2]], axis=1)
countries = ['Greece', 'United Kingdom ', 'United States']
some = debt[debt['country'].isin(countries)]
some = some.set_index('country').T
ax = some[countries[1]].plot(color='red')
some[countries[0]].dropna().plot(color='blue')
some[countries[2]].dropna().plot(color='green')
ax.set_title('Ratio of government debt to GDP', fontsize=14, loc='left')
ax.set_ylabel('Percent')
ax.legend(['Greece', 'United Kingdom', 'United States'], loc='upperleft', fontsize=10)
#%%
ax = some.dropna().plot()
#%%
# OLD VERSION BELOW
# data input
excelFilePath = '../Temp/Debt Database Fall 2013 Vintage.xlsx'
df = pd.read_excel(excelFilePath, sheetname=1, na_values=['…', '….', '']) #, index_col=-1,
#encoding='utf-8')
#%%
"""
plots
"""
### UK debt to GDP since 1800
#construct the years for the x-axis values
years = [year for year in range(1800,2013)]
#get a list of the debt to GDP for the y-axis values
# next line fails, not sure why
dbt_UK = df[df.country=='United Kingdom '][years] #note the extra spaces in 'United Kingdom '
dbt_uk_list = dbt_UK.values.tolist()[0] #note the conversion to a list (required to convert data to be 1 dimensional)
#%%
#plot the data
plt.plot(years,dbt_uk_list) #use graph in default color
plt.ylabel ("debt to GDP")
plt.xlim((1800, 2012)) #for aesthetics, make sure x-axis shows only the relevant years
plt.title("United Kingdom Debt to GDP Between 1800 and 2012")
plt.show()
#%%
### Greece debt to GDP since 1980
#get most recent year in the data (instead of 2013):
max_year = max(df.columns.values[4:].tolist())
#get a list of the years for the x-axis values
years = [year for year in range(1980,max_year+1)]
#get a list of the debt to GDP for the y-axis values
dbt_greece = df[df.country=='Greece'][years]
dbt_greece_list = dbt_greece.values.tolist()[0]
#plot the data
plt.plot(years,dbt_greece_list, color='red') #set graph color
plt.ylabel('Debt to GDP')
plt.title ('Greece Debt to GDP Between 1980 and '+ str(max_year))
plt.show()
|
mit
|
Eric89GXL/scikit-learn
|
examples/tree/plot_tree_regression_multioutput.py
|
7
|
1768
|
"""
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
from sklearn.tree import DecisionTreeRegressor
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
import pylab as pl
pl.figure()
pl.scatter(y[:, 0], y[:, 1], c="k", label="data")
pl.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
pl.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
pl.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
pl.xlim([-6, 6])
pl.ylim([-6, 6])
pl.xlabel("data")
pl.ylabel("target")
pl.title("Multi-output Decision Tree Regression")
pl.legend()
pl.show()
|
bsd-3-clause
|
lin-credible/scikit-learn
|
examples/ensemble/plot_adaboost_twoclass.py
|
347
|
3268
|
"""
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
|
bsd-3-clause
|
arnabgho/sklearn-theano
|
examples/plot_overfeat_layer1_filters.py
|
9
|
1724
|
"""
====================================
Visualization of first layer filters
====================================
The first layers of convolutional neural networks often have very "human
interpretable" values, as seen in these example plots. Visually, these filters
are similar to other filters used in computer vision, such as Gabor filters.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn_theano.feature_extraction import fetch_overfeat_weights_and_biases
def make_visual(layer_weights):
max_scale = layer_weights.max(axis=-1).max(axis=-1)[...,
np.newaxis, np.newaxis]
min_scale = layer_weights.min(axis=-1).min(axis=-1)[...,
np.newaxis, np.newaxis]
return (255 * (layer_weights - min_scale) /
(max_scale - min_scale)).astype('uint8')
def make_mosaic(layer_weights):
# Dirty hack (TM)
lw_shape = layer_weights.shape
lw = make_visual(layer_weights).reshape(8, 12, *lw_shape[1:])
lw = lw.transpose(0, 3, 1, 4, 2)
lw = lw.reshape(8 * lw_shape[-1], 12 * lw_shape[-2], lw_shape[1])
return lw
def plot_filters(layer_weights, title=None, show=False):
mosaic = make_mosaic(layer_weights)
plt.imshow(mosaic, interpolation='nearest')
ax = plt.gca()
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if title is not None:
plt.title(title)
if show:
plt.show()
weights, biases = fetch_overfeat_weights_and_biases(large_network=True)
plot_filters(weights[0])
weights, biases = fetch_overfeat_weights_and_biases(large_network=False)
plt.figure()
plot_filters(weights[0])
plt.show()
|
bsd-3-clause
|
yavalvas/yav_com
|
build/matplotlib/examples/pylab_examples/anscombe.py
|
9
|
1656
|
#!/usr/bin/env python
from __future__ import print_function
"""
Edward Tufte uses this example from Anscombe to show 4 datasets of x
and y that have the same mean, standard deviation, and regression
line, but which are qualitatively different.
matplotlib fun for a rainy day
"""
from pylab import *
x = array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y1 = array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
y2 = array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
y3 = array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
x4 = array([8,8,8,8,8,8,8,19,8,8,8])
y4 = array([6.58,5.76,7.71,8.84,8.47,7.04,5.25,12.50,5.56,7.91,6.89])
def fit(x):
return 3+0.5*x
xfit = array( [amin(x), amax(x) ] )
subplot(221)
plot(x,y1,'ks', xfit, fit(xfit), 'r-', lw=2)
axis([2,20,2,14])
setp(gca(), xticklabels=[], yticks=(4,8,12), xticks=(0,10,20))
text(3,12, 'I', fontsize=20)
subplot(222)
plot(x,y2,'ks', xfit, fit(xfit), 'r-', lw=2)
axis([2,20,2,14])
setp(gca(), xticklabels=[], yticks=(4,8,12), yticklabels=[], xticks=(0,10,20))
text(3,12, 'II', fontsize=20)
subplot(223)
plot(x,y3,'ks', xfit, fit(xfit), 'r-', lw=2)
axis([2,20,2,14])
text(3,12, 'III', fontsize=20)
setp(gca(), yticks=(4,8,12), xticks=(0,10,20))
subplot(224)
xfit = array([amin(x4),amax(x4)])
plot(x4,y4,'ks', xfit, fit(xfit), 'r-', lw=2)
axis([2,20,2,14])
setp(gca(), yticklabels=[], yticks=(4,8,12), xticks=(0,10,20))
text(3,12, 'IV', fontsize=20)
#verify the stats
pairs = (x,y1), (x,y2), (x,y3), (x4,y4)
for x,y in pairs:
print ('mean=%1.2f, std=%1.2f, r=%1.2f'%(mean(y), std(y), corrcoef(x,y)[0][1]))
show()
|
mit
|
ycaihua/scikit-learn
|
sklearn/decomposition/base.py
|
313
|
5647
|
"""Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
|
bsd-3-clause
|
RondaStrauch/landlab
|
landlab/ca/examples/turbulent_suspension_with_settling_and_bleaching.py
|
4
|
16830
|
#!/usr/env/python
"""
isotropic_turbulent_suspension_with_settling_and_bleaching.py
Example of a continuous-time, stochastic, pair-based cellular automaton model,
which simulates the diffusion of suspended particles in a turbulent fluid.
Particles start with an accumulated luminescence signal L = 1, and are bleached
by exposure to light at a rate that depends on distance below the upper surface.
Written by Greg Tucker, July 2015
"""
from __future__ import print_function # for both python 2 and 3 compability
import time
import matplotlib
from pylab import figure, show, clf
from numpy import where, exp, amin
from landlab import RasterModelGrid, ModelParameterDictionary
from landlab.plot.imshow import imshow_node_grid
from landlab.components.cellular_automata.celllab_cts import Transition, CAPlotter
from landlab.components.cellular_automata.oriented_raster_cts import OrientedRasterCTS
class TurbulentSuspensionAndBleachingModel(OrientedRasterCTS):
"""
Examples
---------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 4
... model_grid_column__count: number of columns in grid
... 4
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 2.0
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.node_state
array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
>>> tsbm.grid.at_node['osl']
array([ 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.,
0., 0., 0.])
>>> tsbm.n_xn
array([0, 1, 1, 0, 0, 1, 1, 0])
>>> tsbm.fluid_surface_height
3.5
"""
def __init__(self, input_stream):
"""
Reads in parameters and initializes the model.
Examples
--------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 4
... model_grid_column__count: number of columns in grid
... 4
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 2.0
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.node_state
array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
>>> tsbm.grid.at_node['osl']
array([ 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.,
0., 0., 0.])
>>> tsbm.n_xn
array([0, 1, 1, 0, 0, 1, 1, 0])
>>> tsbm.fluid_surface_height
3.5
"""
# Get a source for input parameters.
params = ModelParameterDictionary(input_stream)
# Read user-defined parameters
nr = params.read_int('model_grid_row__count') # number of rows (CSDMS Standard Name [CSN])
nc = params.read_int('model_grid_column__count') # number of cols (CSN)
self.plot_interval = params.read_float('plot_interval') # interval for plotting output, s
self.run_duration = params.read_float('model__run_time') # duration of run, sec (CSN)
self.report_interval = params.read_float('model__report_interval') # report interval, in real-time seconds
self.bleach_T0 = params.read_float('surface_bleaching_time_scale') # time scale for bleaching at fluid surface, s
self.zstar = params.read_float('light_attenuation_length') # length scale for light attenuation in fluid, CELLS
# Derived parameters
self.fluid_surface_height = nr-0.5
# Calculate when we next want to report progress.
self.next_report = time.time() + self.report_interval
# Create grid
mg = RasterModelGrid(nr, nc, 1.0)
# Make the boundaries be walls
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
# Set up the states and pair transitions.
ns_dict = { 0 : 'fluid', 1 : 'particle' }
xn_list = self.setup_transition_list()
# Create the node-state array and attach it to the grid
node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=int)
# For visual display purposes, set all boundary nodes to fluid
node_state_grid[mg.closed_boundary_nodes] = 0
# Initialize the node-state array: here, the initial condition is a pile of
# resting grains at the bottom of a container.
bottom_rows = where(mg.node_y<0.4*nr)[0]
node_state_grid[bottom_rows] = 1
# Create a data array for bleaching.
# Here, osl=optically stimulated luminescence, normalized to the original
# signal (hence, initially all unity). Over time this signal may get
# bleached out due to exposure to light.
self.osl = mg.add_zeros('node', 'osl')
self.osl[bottom_rows] = 1.0
self.osl_display = mg.add_zeros('node', 'osl_display')
self.osl_display[bottom_rows] = 1.0
# We'll need an array to track the last time any given node was
# updated, so we can figure out the duration of light exposure between
# update events
self.last_update_time = mg.add_zeros('node','last_update_time')
# Call the base class (RasterCTS) init method
super(TurbulentSuspensionAndBleachingModel, \
self).__init__(mg, ns_dict, xn_list, node_state_grid, prop_data=self.osl)
# Set up plotting (if plotting desired)
if self.plot_interval <= self.run_duration:
self.initialize_plotting()
def initialize_plotting(self):
"""
Creates a CA plotter object, sets its colormap, and plots the initial
model state.
"""
# Set up some plotting information
grain = '#5F594D'
bleached_grain = '#CC0000'
fluid = '#D0E4F2'
clist = [fluid,bleached_grain,grain]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
self.ca_plotter = CAPlotter(self, cmap=my_cmap)
# Plot the initial grid
self.ca_plotter.update_plot()
# Make a colormap for use in showing the bleaching of each grain
clist = [(0.0, (1.0, 1.0, 1.0)), (0.49, (0.8, 0.8, 0.8)), (1.0, (0.0, 0.0, 0.0))]
self.cmap_for_osl = matplotlib.colors.LinearSegmentedColormap.from_list('osl_cmap', clist)
def setup_transition_list(self):
"""
Creates and returns a list of Transition() objects to represent state
transitions for a biased random walk, in which the rate of downward
motion is greater than the rate in the other three directions.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
State 0 represents fluid and state 1 represents a particle (such as a
sediment grain, tea leaf, or dissolved heavy particle).
The states and transitions are as follows:
Pair state Transition to Process Rate (cells/s)
========== ============= ======= ==============
0 (0-0) (none) - -
1 (0-1) 2 (1-0) left motion 10.0
2 (1-0) 1 (0-1) right motion 10.0
3 (1-1) (none) - -
4 (0-0) (none) - -
5 (0-1) 2 (1-0) down motion 10.55
6 (1-0) 1 (0-1) up motion 9.45
7 (1-1) (none) - -
"""
# Create an empty transition list
xn_list = []
# Append four transitions to the list.
# Note that the arguments to the Transition() object constructor are:
# - Tuple representing starting pair state
# (left cell, right cell, orientation [0=horizontal])
# - Tuple representing new pair state
# (bottom cell, top cell, orientation [1=vertical])
# - Transition rate (cells per time step, in this case 1 sec)
# - Name for transition
# - Flag indicating that the transition involves an exchange of properties
# - Function to be called after each transition, to update a property
# (in this case, to simulate bleaching of the luminescence signal)
xn_list.append( Transition((0,1,0), (1,0,0), 10., 'left motion', True, self.update_bleaching) )
xn_list.append( Transition((1,0,0), (0,1,0), 10., 'right motion', True, self.update_bleaching) )
xn_list.append( Transition((0,1,1), (1,0,1), 10.55, 'down motion', True, self.update_bleaching) )
xn_list.append( Transition((1,0,1), (0,1,1), 9.45, 'up motion', True, self.update_bleaching) )
return xn_list
def bleach_grain(self, node, dt):
"""
Updates the luminescence signal at node.
Examples
--------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 10
... model_grid_column__count: number of columns in grid
... 3
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 6.5
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.bleach_grain(10, 1.0)
>>> int(tsbm.prop_data[tsbm.propid[10]]*1000)
858
"""
depth = self.fluid_surface_height - self.grid.node_y[node]
T_bleach = self.bleach_T0*exp( depth/self.zstar)
self.prop_data[self.propid[node]] *= exp( -dt/T_bleach )
def update_bleaching(self, ca_unused, node1, node2, time_now):
"""
Updates the luminescence signal at a pair of nodes that have just
undergone a transition, if either or both nodes is a grain.
Examples
--------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 10
... model_grid_column__count: number of columns in grid
... 3
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 6.5
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.update_bleaching(tsbm, 10, 13, 1.0)
>>> int(tsbm.prop_data[tsbm.propid[10]]*1000)
858
>>> tsbm.prop_data[tsbm.propid[13]]
0.0
"""
if self.node_state[node1]==1:
dt = time_now - self.last_update_time[self.propid[node1]]
self.bleach_grain(node1, dt)
self.last_update_time[self.propid[node1]] = time_now
if self.node_state[node2]==1:
dt = time_now - self.last_update_time[self.propid[node2]]
self.bleach_grain(node2, dt)
self.last_update_time[self.propid[node2]] = time_now
def synchronize_bleaching(self, sync_time):
"""
Brings all nodes up to the same time, sync_time, by applying bleaching
up to this time, and updating last_update_time.
Notes
-----
In a CellLab-CTS model, the "time" is usually different for each node:
some will have only just recently undergone a transition and had their
properties (in this case, OSL bleaching) updated, while others will
have last been updated a long time ago, and some may never have had a
transition. If we want to plot the properties at a consistent time, we
need to bring all node properties (again, in this case, OSL) up to
date. This method does so.
We multiply elapsed time (between last update and "sync time") by
the node state, because we only want to update the solid particles---
because the state of a particle is 1 and fluid 0, this multiplication
masks out the fluid nodes.
We don't call bleach_grain(), because we want to take advantage of
numpy array operations rather than calling a method for each node.
Examples
--------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 10
... model_grid_column__count: number of columns in grid
... 3
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 6.5
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.synchronize_bleaching(1.0)
>>> int(tsbm.osl[10]*100000)
85897
"""
dt = (sync_time - self.last_update_time[self.propid])*self.node_state
assert (amin(dt)>=0.0), 'sync_time must be >= 0 everywhere'
depth = self.fluid_surface_height - self.grid.node_y
T_bleach = self.bleach_T0*exp( depth/self.zstar)
self.prop_data[self.propid] *= exp( -dt/T_bleach )
self.last_update_time[self.propid] = sync_time*self.node_state
def go(self):
"""
Runs the model.
"""
# RUN
while self.current_time < self.run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= self.next_report:
print('Current sim time',self.current_time,'(',100*self.current_time/self.run_duration,'%)')
self.next_report = current_real_time + self.report_interval
# Run the model forward in time until the next output step
self.run(self.current_time+self.plot_interval, self.node_state,
plot_each_transition=False)
self.current_time += self.plot_interval
self.synchronize_bleaching(self.current_time)
if self.plot_interval <= self.run_duration:
# Plot the current grid
self.ca_plotter.update_plot()
# Display the OSL content of grains
figure(3)
clf()
self.osl_display[:] = self.osl[self.propid]+self.node_state
imshow_node_grid(self.grid, 'osl_display', limits=(0.0, 2.0),
cmap=self.cmap_for_osl)
show()
figure(1)
def finalize(self):
# FINALIZE
# Plot
self.ca_plotter.finalize()
# If user runs this file, activate the main() function.
if __name__ == "__main__":
# Parse command-line argument, if any
import sys
if len(sys.argv)>1:
input_file_name = sys.argv[1]
else:
input_file_name = 'tsbm_inputs.txt'
# Instantiate the model
ca_model = TurbulentSuspensionAndBleachingModel(input_file_name)
# Run the model
ca_model.go()
# Clean up
ca_model.finalize()
|
mit
|
zorojean/scikit-learn
|
examples/tree/plot_tree_regression_multioutput.py
|
206
|
1800
|
"""
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
denniszollo/MAVProxy
|
MAVProxy/mavproxy.py
|
1
|
38949
|
#!/usr/bin/env python
'''
mavproxy - a MAVLink proxy program
Copyright Andrew Tridgell 2011
Released under the GNU GPL version 3 or later
'''
import sys, os, time, socket, signal
import fnmatch, errno, threading
import serial, Queue, select
import traceback
import select
from MAVProxy.modules.lib import textconsole
from MAVProxy.modules.lib import rline
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import dumpstacks
# adding all this allows pyinstaller to build a working windows executable
# note that using --hidden-import does not work for these modules
try:
from multiprocessing import freeze_support
from pymavlink import mavwp, mavutil
import matplotlib, HTMLParser
try:
import readline
except ImportError:
import pyreadline as readline
except Exception:
pass
if __name__ == '__main__':
freeze_support()
class MPStatus(object):
'''hold status information about the mavproxy'''
def __init__(self):
self.gps = None
self.msgs = {}
self.msg_count = {}
self.counters = {'MasterIn' : [], 'MasterOut' : 0, 'FGearIn' : 0, 'FGearOut' : 0, 'Slave' : 0}
self.setup_mode = opts.setup
self.mav_error = 0
self.altitude = 0
self.last_altitude_announce = 0.0
self.last_distance_announce = 0.0
self.exit = False
self.flightmode = 'MAV'
self.last_mode_announce = 0
self.logdir = None
self.last_heartbeat = 0
self.last_message = 0
self.heartbeat_error = False
self.last_apm_msg = None
self.last_apm_msg_time = 0
self.highest_msec = 0
self.have_gps_lock = False
self.lost_gps_lock = False
self.last_gps_lock = 0
self.watch = None
self.last_streamrate1 = -1
self.last_streamrate2 = -1
self.last_seq = 0
self.armed = False
def show(self, f, pattern=None):
'''write status to status.txt'''
if pattern is None:
f.write('Counters: ')
for c in self.counters:
f.write('%s:%s ' % (c, self.counters[c]))
f.write('\n')
f.write('MAV Errors: %u\n' % self.mav_error)
f.write(str(self.gps)+'\n')
for m in sorted(self.msgs.keys()):
if pattern is not None and not fnmatch.fnmatch(str(m).upper(), pattern.upper()):
continue
f.write("%u: %s\n" % (self.msg_count[m], str(self.msgs[m])))
def write(self):
'''write status to status.txt'''
f = open('status.txt', mode='w')
self.show(f)
f.close()
def say_text(text, priority='important'):
'''text output - default function for say()'''
mpstate.console.writeln(text)
def say(text, priority='important'):
'''text and/or speech output'''
mpstate.functions.say(text, priority)
def add_input(cmd, immediate=False):
'''add some command input to be processed'''
if immediate:
process_stdin(cmd)
else:
mpstate.input_queue.put(cmd)
class MAVFunctions(object):
'''core functions available in modules'''
def __init__(self):
self.process_stdin = add_input
self.param_set = param_set
self.get_mav_param = get_mav_param
self.say = say_text
# input handler can be overridden by a module
self.input_handler = None
class MPState(object):
'''holds state of mavproxy'''
def __init__(self):
self.console = textconsole.SimpleConsole()
self.map = None
self.map_functions = {}
self.vehicle_type = None
self.vehicle_name = None
from MAVProxy.modules.lib.mp_settings import MPSettings, MPSetting
self.settings = MPSettings(
[ MPSetting('link', int, 1, 'Primary Link', tab='Link', range=(0,4), increment=1),
MPSetting('streamrate', int, 4, 'Stream rate link1', range=(-1,20), increment=1),
MPSetting('streamrate2', int, 4, 'Stream rate link2', range=(-1,20), increment=1),
MPSetting('heartbeat', int, 1, 'Heartbeat rate', range=(0,5), increment=1),
MPSetting('mavfwd', bool, True, 'Allow forwarded control'),
MPSetting('mavfwd_rate', bool, False, 'Allow forwarded rate control'),
MPSetting('shownoise', bool, True, 'Show non-MAVLink data'),
MPSetting('baudrate', int, opts.baudrate, 'baudrate for new links', range=(0,10000000), increment=1),
MPSetting('rtscts', bool, opts.rtscts, 'enable flow control'),
MPSetting('select_timeout', float, 0.01, 'select timeout'),
MPSetting('altreadout', int, 10, 'Altitude Readout',
range=(0,100), increment=1, tab='Announcements'),
MPSetting('distreadout', int, 200, 'Distance Readout', range=(0,10000), increment=1),
MPSetting('moddebug', int, opts.moddebug, 'Module Debug Level', range=(0,3), increment=1, tab='Debug'),
MPSetting('compdebug', int, 0, 'Computation Debug Mask', range=(0,3), tab='Debug'),
MPSetting('flushlogs', bool, False, 'Flush logs on every packet'),
MPSetting('requireexit', bool, False, 'Require exit command'),
MPSetting('wpupdates', bool, True, 'Announce waypoint updates'),
MPSetting('basealt', int, 0, 'Base Altitude', range=(0,30000), increment=1, tab='Altitude'),
MPSetting('wpalt', int, 100, 'Default WP Altitude', range=(0,10000), increment=1),
MPSetting('rallyalt', int, 90, 'Default Rally Altitude', range=(0,10000), increment=1),
MPSetting('terrainalt', str, 'Auto', 'Use terrain altitudes', choice=['Auto','True','False']),
MPSetting('rally_breakalt', int, 40, 'Default Rally Break Altitude', range=(0,10000), increment=1),
MPSetting('rally_flags', int, 0, 'Default Rally Flags', range=(0,10000), increment=1),
MPSetting('source_system', int, 255, 'MAVLink Source system', range=(0,255), increment=1, tab='MAVLink'),
MPSetting('source_component', int, 0, 'MAVLink Source component', range=(0,255), increment=1),
MPSetting('target_system', int, 0, 'MAVLink target system', range=(0,255), increment=1),
MPSetting('target_component', int, 0, 'MAVLink target component', range=(0,255), increment=1),
MPSetting('state_basedir', str, None, 'base directory for logs and aircraft directories')
])
self.completions = {
"script" : ["(FILENAME)"],
"set" : ["(SETTING)"],
"status" : ["(VARIABLE)"],
"module" : ["list",
"load (AVAILMODULES)",
"<unload|reload> (LOADEDMODULES)"]
}
self.status = MPStatus()
# master mavlink device
self.mav_master = None
# mavlink outputs
self.mav_outputs = []
# SITL output
self.sitl_output = None
self.mav_param = mavparm.MAVParmDict()
self.modules = []
self.public_modules = {}
self.functions = MAVFunctions()
self.select_extra = {}
self.continue_mode = False
self.aliases = {}
import platform
self.system = platform.system()
def module(self, name):
'''Find a public module (most modules are private)'''
if name in self.public_modules:
return self.public_modules[name]
return None
def master(self):
'''return the currently chosen mavlink master object'''
if len(self.mav_master) == 0:
return None
if self.settings.link > len(self.mav_master):
self.settings.link = 1
# try to use one with no link error
if not self.mav_master[self.settings.link-1].linkerror:
return self.mav_master[self.settings.link-1]
for m in self.mav_master:
if not m.linkerror:
return m
return self.mav_master[self.settings.link-1]
def get_mav_param(param, default=None):
'''return a EEPROM parameter value'''
return mpstate.mav_param.get(param, default)
def param_set(name, value, retries=3):
'''set a parameter'''
name = name.upper()
return mpstate.mav_param.mavset(mpstate.master(), name, value, retries=retries)
def cmd_script(args):
'''run a script'''
if len(args) < 1:
print("usage: script <filename>")
return
run_script(args[0])
def cmd_set(args):
'''control mavproxy options'''
mpstate.settings.command(args)
def cmd_status(args):
'''show status'''
if len(args) == 0:
mpstate.status.show(sys.stdout, pattern=None)
else:
for pattern in args:
mpstate.status.show(sys.stdout, pattern=pattern)
def cmd_setup(args):
mpstate.status.setup_mode = True
mpstate.rl.set_prompt("")
def cmd_reset(args):
print("Resetting master")
mpstate.master().reset()
def cmd_watch(args):
'''watch a mavlink packet pattern'''
if len(args) == 0:
mpstate.status.watch = None
return
mpstate.status.watch = args[0]
print("Watching %s" % mpstate.status.watch)
def load_module(modname, quiet=False):
'''load a module'''
modpaths = ['MAVProxy.modules.mavproxy_%s' % modname, modname]
for (m,pm) in mpstate.modules:
if m.name == modname:
if not quiet:
print("module %s already loaded" % modname)
return False
for modpath in modpaths:
try:
m = import_package(modpath)
reload(m)
module = m.init(mpstate)
if isinstance(module, mp_module.MPModule):
mpstate.modules.append((module, m))
if not quiet:
print("Loaded module %s" % (modname,))
return True
else:
ex = "%s.init did not return a MPModule instance" % modname
break
except ImportError as msg:
ex = msg
if mpstate.settings.moddebug > 1:
import traceback
print(traceback.format_exc())
print("Failed to load module: %s. Use 'set moddebug 3' in the MAVProxy console to enable traceback" % ex)
return False
def unload_module(modname):
'''unload a module'''
for (m,pm) in mpstate.modules:
if m.name == modname:
if hasattr(m, 'unload'):
m.unload()
mpstate.modules.remove((m,pm))
print("Unloaded module %s" % modname)
return True
print("Unable to find module %s" % modname)
return False
def cmd_module(args):
'''module commands'''
usage = "usage: module <list|load|reload|unload>"
if len(args) < 1:
print(usage)
return
if args[0] == "list":
for (m,pm) in mpstate.modules:
print("%s: %s" % (m.name, m.description))
elif args[0] == "load":
if len(args) < 2:
print("usage: module load <name>")
return
load_module(args[1])
elif args[0] == "reload":
if len(args) < 2:
print("usage: module reload <name>")
return
modname = args[1]
pmodule = None
for (m,pm) in mpstate.modules:
if m.name == modname:
pmodule = pm
if pmodule is None:
print("Module %s not loaded" % modname)
return
if unload_module(modname):
import zipimport
try:
reload(pmodule)
except ImportError:
clear_zipimport_cache()
reload(pmodule)
if load_module(modname, quiet=True):
print("Reloaded module %s" % modname)
elif args[0] == "unload":
if len(args) < 2:
print("usage: module unload <name>")
return
modname = os.path.basename(args[1])
unload_module(modname)
else:
print(usage)
def cmd_alias(args):
'''alias commands'''
usage = "usage: alias <add|remove|list>"
if len(args) < 1 or args[0] == "list":
if len(args) >= 2:
wildcard = args[1].upper()
else:
wildcard = '*'
for a in sorted(mpstate.aliases.keys()):
if fnmatch.fnmatch(a.upper(), wildcard):
print("%-15s : %s" % (a, mpstate.aliases[a]))
elif args[0] == "add":
if len(args) < 3:
print(usage)
return
a = args[1]
mpstate.aliases[a] = ' '.join(args[2:])
elif args[0] == "remove":
if len(args) != 2:
print(usage)
return
a = args[1]
if a in mpstate.aliases:
mpstate.aliases.pop(a)
else:
print("no alias %s" % a)
else:
print(usage)
return
def clear_zipimport_cache():
"""Clear out cached entries from _zip_directory_cache.
See http://www.digi.com/wiki/developer/index.php/Error_messages"""
import sys, zipimport
syspath_backup = list(sys.path)
zipimport._zip_directory_cache.clear()
# load back items onto sys.path
sys.path = syspath_backup
# add this too: see https://mail.python.org/pipermail/python-list/2005-May/353229.html
sys.path_importer_cache.clear()
# http://stackoverflow.com/questions/211100/pythons-import-doesnt-work-as-expected
# has info on why this is necessary.
def import_package(name):
"""Given a package name like 'foo.bar.quux', imports the package
and returns the desired module."""
import zipimport
try:
mod = __import__(name)
except ImportError:
clear_zipimport_cache()
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
command_map = {
'script' : (cmd_script, 'run a script of MAVProxy commands'),
'setup' : (cmd_setup, 'go into setup mode'),
'reset' : (cmd_reset, 'reopen the connection to the MAVLink master'),
'status' : (cmd_status, 'show status'),
'set' : (cmd_set, 'mavproxy settings'),
'watch' : (cmd_watch, 'watch a MAVLink pattern'),
'module' : (cmd_module, 'module commands'),
'alias' : (cmd_alias, 'command aliases')
}
def process_stdin(line):
'''handle commands from user'''
if line is None:
sys.exit(0)
# allow for modules to override input handling
if mpstate.functions.input_handler is not None:
mpstate.functions.input_handler(line)
return
line = line.strip()
if mpstate.status.setup_mode:
# in setup mode we send strings straight to the master
if line == '.':
mpstate.status.setup_mode = False
mpstate.status.flightmode = "MAV"
mpstate.rl.set_prompt("MAV> ")
return
if line != '+++':
line += '\r'
for c in line:
time.sleep(0.01)
mpstate.master().write(c)
return
if not line:
return
args = line.split()
cmd = args[0]
while cmd in mpstate.aliases:
line = mpstate.aliases[cmd]
args = line.split() + args[1:]
cmd = args[0]
if cmd == 'help':
k = command_map.keys()
k.sort()
for cmd in k:
(fn, help) = command_map[cmd]
print("%-15s : %s" % (cmd, help))
return
if cmd == 'exit' and mpstate.settings.requireexit:
mpstate.status.exit = True
return
if not cmd in command_map:
for (m,pm) in mpstate.modules:
if hasattr(m, 'unknown_command'):
try:
if m.unknown_command(args):
return
except Exception as e:
print("ERROR in command: %s" % str(e))
print("Unknown command '%s'" % line)
return
(fn, help) = command_map[cmd]
try:
fn(args[1:])
except Exception as e:
print("ERROR in command %s: %s" % (args[1:], str(e)))
if mpstate.settings.moddebug > 1:
traceback.print_exc()
def process_master(m):
'''process packets from the MAVLink master'''
try:
s = m.recv(16*1024)
except Exception:
time.sleep(0.1)
return
# prevent a dead serial port from causing the CPU to spin. The user hitting enter will
# cause it to try and reconnect
if len(s) == 0:
time.sleep(0.1)
return
if (mpstate.settings.compdebug & 1) != 0:
return
if mpstate.logqueue_raw:
mpstate.logqueue_raw.put(str(s))
if mpstate.status.setup_mode:
if mpstate.system == 'Windows':
# strip nsh ansi codes
s = s.replace("\033[K","")
sys.stdout.write(str(s))
sys.stdout.flush()
return
if m.first_byte and opts.auto_protocol:
m.auto_mavlink_version(s)
msgs = m.mav.parse_buffer(s)
if msgs:
for msg in msgs:
if getattr(m, '_timestamp', None) is None:
m.post_message(msg)
if msg.get_type() == "BAD_DATA":
if opts.show_errors:
mpstate.console.writeln("MAV error: %s" % msg)
mpstate.status.mav_error += 1
def process_mavlink(slave):
'''process packets from MAVLink slaves, forwarding to the master'''
try:
buf = slave.recv()
except socket.error:
return
try:
if slave.first_byte and opts.auto_protocol:
slave.auto_mavlink_version(buf)
msgs = slave.mav.parse_buffer(buf)
except mavutil.mavlink.MAVError as e:
mpstate.console.error("Bad MAVLink slave message from %s: %s" % (slave.address, e.message))
return
if msgs is None:
return
if mpstate.settings.mavfwd and not mpstate.status.setup_mode:
for m in msgs:
if mpstate.status.watch is not None:
if fnmatch.fnmatch(m.get_type().upper(), mpstate.status.watch.upper()):
mpstate.console.writeln('> '+ str(m))
mpstate.master().write(m.get_msgbuf())
mpstate.status.counters['Slave'] += 1
def mkdir_p(dir):
'''like mkdir -p'''
if not dir:
return
if dir.endswith("/"):
mkdir_p(dir[:-1])
return
if os.path.isdir(dir):
return
mkdir_p(os.path.dirname(dir))
os.mkdir(dir)
def log_writer():
'''log writing thread'''
while True:
mpstate.logfile_raw.write(mpstate.logqueue_raw.get())
while not mpstate.logqueue_raw.empty():
mpstate.logfile_raw.write(mpstate.logqueue_raw.get())
while not mpstate.logqueue.empty():
mpstate.logfile.write(mpstate.logqueue.get())
if mpstate.settings.flushlogs:
mpstate.logfile.flush()
mpstate.logfile_raw.flush()
# If state_basedir is NOT set then paths for logs and aircraft
# directories are relative to mavproxy's cwd
def log_paths():
'''Returns tuple (logdir, telemetry_log_filepath, raw_telemetry_log_filepath)'''
if opts.aircraft is not None:
if opts.mission is not None:
print(opts.mission)
dirname = "%s/logs/%s/Mission%s" % (opts.aircraft, time.strftime("%Y-%m-%d"), opts.mission)
else:
dirname = "%s/logs/%s" % (opts.aircraft, time.strftime("%Y-%m-%d"))
# dirname is currently relative. Possibly add state_basedir:
if mpstate.settings.state_basedir is not None:
dirname = os.path.join(mpstate.settings.state_basedir,dirname)
mkdir_p(dirname)
highest = None
for i in range(1, 10000):
fdir = os.path.join(dirname, 'flight%u' % i)
if not os.path.exists(fdir):
break
highest = fdir
if mpstate.continue_mode and highest is not None:
fdir = highest
elif os.path.exists(fdir):
print("Flight logs full")
sys.exit(1)
logname = 'flight.tlog'
logdir = fdir
else:
logname = os.path.basename(opts.logfile)
dir_path = os.path.dirname(opts.logfile)
if not os.path.isabs(dir_path) and mpstate.settings.state_basedir is not None:
dir_path = os.path.join(mpstate.settings.state_basedir,dir_path)
logdir = dir_path
mkdir_p(logdir)
return (logdir,
os.path.join(logdir, logname),
os.path.join(logdir, logname + '.raw'))
def open_telemetry_logs(logpath_telem, logpath_telem_raw):
'''open log files'''
if opts.append_log or opts.continue_mode:
mode = 'a'
else:
mode = 'w'
mpstate.logfile = open(logpath_telem, mode=mode)
mpstate.logfile_raw = open(logpath_telem_raw, mode=mode)
print("Log Directory: %s" % mpstate.status.logdir)
print("Telemetry log: %s" % logpath_telem)
# use a separate thread for writing to the logfile to prevent
# delays during disk writes (important as delays can be long if camera
# app is running)
t = threading.Thread(target=log_writer, name='log_writer')
t.daemon = True
t.start()
def set_stream_rates():
'''set mavlink stream rates'''
if (not msg_period.trigger() and
mpstate.status.last_streamrate1 == mpstate.settings.streamrate and
mpstate.status.last_streamrate2 == mpstate.settings.streamrate2):
return
mpstate.status.last_streamrate1 = mpstate.settings.streamrate
mpstate.status.last_streamrate2 = mpstate.settings.streamrate2
for master in mpstate.mav_master:
if master.linknum == 0:
rate = mpstate.settings.streamrate
else:
rate = mpstate.settings.streamrate2
if rate != -1:
master.mav.request_data_stream_send(mpstate.settings.target_system, mpstate.settings.target_component,
mavutil.mavlink.MAV_DATA_STREAM_ALL,
rate, 1)
def check_link_status():
'''check status of master links'''
tnow = time.time()
if mpstate.status.last_message != 0 and tnow > mpstate.status.last_message + 5:
say("no link")
mpstate.status.heartbeat_error = True
for master in mpstate.mav_master:
if not master.linkerror and (tnow > master.last_message + 5 or master.portdead):
say("link %u down" % (master.linknum+1))
master.linkerror = True
def send_heartbeat(master):
if master.mavlink10():
master.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS, mavutil.mavlink.MAV_AUTOPILOT_INVALID,
0, 0, 0)
else:
MAV_GROUND = 5
MAV_AUTOPILOT_NONE = 4
master.mav.heartbeat_send(MAV_GROUND, MAV_AUTOPILOT_NONE)
def periodic_tasks():
'''run periodic checks'''
if mpstate.status.setup_mode:
return
if (mpstate.settings.compdebug & 2) != 0:
return
if mpstate.settings.heartbeat != 0:
heartbeat_period.frequency = mpstate.settings.heartbeat
if heartbeat_period.trigger() and mpstate.settings.heartbeat != 0:
mpstate.status.counters['MasterOut'] += 1
for master in mpstate.mav_master:
send_heartbeat(master)
if heartbeat_check_period.trigger():
check_link_status()
set_stream_rates()
# call optional module idle tasks. These are called at several hundred Hz
for (m,pm) in mpstate.modules:
if hasattr(m, 'idle_task'):
try:
m.idle_task()
except Exception as msg:
if mpstate.settings.moddebug == 1:
print(msg)
elif mpstate.settings.moddebug > 1:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
# also see if the module should be unloaded:
if m.needs_unloading:
unload_module(m.name)
def main_loop():
'''main processing loop'''
if not mpstate.status.setup_mode and not opts.nowait:
for master in mpstate.mav_master:
send_heartbeat(master)
if master.linknum == 0:
master.wait_heartbeat()
set_stream_rates()
while True:
if mpstate is None or mpstate.status.exit:
return
while not mpstate.input_queue.empty():
line = mpstate.input_queue.get()
mpstate.input_count += 1
cmds = line.split(';')
if len(cmds) == 1 and cmds[0] == "":
mpstate.empty_input_count += 1
for c in cmds:
process_stdin(c)
for master in mpstate.mav_master:
if master.fd is None:
if master.port.inWaiting() > 0:
process_master(master)
periodic_tasks()
rin = []
for master in mpstate.mav_master:
if master.fd is not None and not master.portdead:
rin.append(master.fd)
for m in mpstate.mav_outputs:
rin.append(m.fd)
if rin == []:
time.sleep(0.0001)
continue
for fd in mpstate.select_extra:
rin.append(fd)
try:
(rin, win, xin) = select.select(rin, [], [], mpstate.settings.select_timeout)
except select.error:
continue
if mpstate is None:
return
for fd in rin:
if mpstate is None:
return
for master in mpstate.mav_master:
if fd == master.fd:
process_master(master)
if mpstate is None:
return
continue
for m in mpstate.mav_outputs:
if fd == m.fd:
process_mavlink(m)
if mpstate is None:
return
continue
# this allow modules to register their own file descriptors
# for the main select loop
if fd in mpstate.select_extra:
try:
# call the registered read function
(fn, args) = mpstate.select_extra[fd]
fn(args)
except Exception as msg:
if mpstate.settings.moddebug == 1:
print(msg)
# on an exception, remove it from the select list
mpstate.select_extra.pop(fd)
def input_loop():
'''wait for user input'''
while mpstate.status.exit != True:
try:
if mpstate.status.exit != True:
line = raw_input(mpstate.rl.prompt)
except EOFError:
mpstate.status.exit = True
sys.exit(1)
mpstate.input_queue.put(line)
def run_script(scriptfile):
'''run a script file'''
try:
f = open(scriptfile, mode='r')
except Exception:
return
mpstate.console.writeln("Running script %s" % scriptfile)
for line in f:
line = line.strip()
if line == "" or line.startswith('#'):
continue
if line.startswith('@'):
line = line[1:]
else:
mpstate.console.writeln("-> %s" % line)
process_stdin(line)
f.close()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser("mavproxy.py [options]")
parser.add_option("--master", dest="master", action='append',
metavar="DEVICE[,BAUD]", help="MAVLink master port and optional baud rate",
default=[])
parser.add_option("--out", dest="output", action='append',
metavar="DEVICE[,BAUD]", help="MAVLink output port and optional baud rate",
default=[])
parser.add_option("--baudrate", dest="baudrate", type='int',
help="default serial baud rate", default=57600)
parser.add_option("--sitl", dest="sitl", default=None, help="SITL output port")
parser.add_option("--streamrate",dest="streamrate", default=4, type='int',
help="MAVLink stream rate")
parser.add_option("--source-system", dest='SOURCE_SYSTEM', type='int',
default=255, help='MAVLink source system for this GCS')
parser.add_option("--source-component", dest='SOURCE_COMPONENT', type='int',
default=0, help='MAVLink source component for this GCS')
parser.add_option("--target-system", dest='TARGET_SYSTEM', type='int',
default=0, help='MAVLink target master system')
parser.add_option("--target-component", dest='TARGET_COMPONENT', type='int',
default=0, help='MAVLink target master component')
parser.add_option("--logfile", dest="logfile", help="MAVLink master logfile",
default='mav.tlog')
parser.add_option("-a", "--append-log", dest="append_log", help="Append to log files",
action='store_true', default=False)
parser.add_option("--quadcopter", dest="quadcopter", help="use quadcopter controls",
action='store_true', default=False)
parser.add_option("--setup", dest="setup", help="start in setup mode",
action='store_true', default=False)
parser.add_option("--nodtr", dest="nodtr", help="disable DTR drop on close",
action='store_true', default=False)
parser.add_option("--show-errors", dest="show_errors", help="show MAVLink error packets",
action='store_true', default=False)
parser.add_option("--speech", dest="speech", help="use text to speach",
action='store_true', default=False)
parser.add_option("--aircraft", dest="aircraft", help="aircraft name", default=None)
parser.add_option("--cmd", dest="cmd", help="initial commands", default=None, action='append')
parser.add_option("--console", action='store_true', help="use GUI console")
parser.add_option("--map", action='store_true', help="load map module")
parser.add_option(
'--load-module',
action='append',
default=[],
help='Load the specified module. Can be used multiple times, or with a comma separated list')
parser.add_option("--mav09", action='store_true', default=False, help="Use MAVLink protocol 0.9")
parser.add_option("--auto-protocol", action='store_true', default=False, help="Auto detect MAVLink protocol version")
parser.add_option("--nowait", action='store_true', default=False, help="don't wait for HEARTBEAT on startup")
parser.add_option("-c", "--continue", dest='continue_mode', action='store_true', default=False, help="continue logs")
parser.add_option("--dialect", default="ardupilotmega", help="MAVLink dialect")
parser.add_option("--rtscts", action='store_true', help="enable hardware RTS/CTS flow control")
parser.add_option("--moddebug", type=int, help="module debug level", default=0)
parser.add_option("--mission", dest="mission", help="mission name", default=None)
parser.add_option("--daemon", action='store_true', help="run in daemon mode, do not start interactive shell")
parser.add_option("--profile", action='store_true', help="run the Yappi python profiler")
parser.add_option("--state-basedir", default=None, help="base directory for logs and aircraft directories")
parser.add_option("--version", action='store_true', help="version information")
(opts, args) = parser.parse_args()
# warn people about ModemManager which interferes badly with APM and Pixhawk
if os.path.exists("/usr/sbin/ModemManager"):
print("WARNING: You should uninstall ModemManager as it conflicts with APM and Pixhawk")
if opts.mav09:
os.environ['MAVLINK09'] = '1'
from pymavlink import mavutil, mavparm
mavutil.set_dialect(opts.dialect)
#version information
if opts.version:
import pkg_resources
version = pkg_resources.require("mavproxy")[0].version
print "MAVProxy is a modular ground station using the mavlink protocol"
print "MAVProxy Version: " + version
sys.exit(1)
# global mavproxy state
mpstate = MPState()
mpstate.status.exit = False
mpstate.command_map = command_map
mpstate.continue_mode = opts.continue_mode
# queues for logging
mpstate.logqueue = Queue.Queue()
mpstate.logqueue_raw = Queue.Queue()
if opts.speech:
# start the speech-dispatcher early, so it doesn't inherit any ports from
# modules/mavutil
load_module('speech')
if not opts.master:
serial_list = mavutil.auto_detect_serial(preferred_list=['*FTDI*',"*Arduino_Mega_2560*", "*3D_Robotics*", "*USB_to_UART*", '*PX4*', '*FMU*'])
print('Auto-detected serial ports are:')
for port in serial_list:
print("%s" % port)
# container for status information
mpstate.settings.target_system = opts.TARGET_SYSTEM
mpstate.settings.target_component = opts.TARGET_COMPONENT
mpstate.mav_master = []
mpstate.rl = rline.rline("MAV> ", mpstate)
def quit_handler(signum = None, frame = None):
#print 'Signal handler called with signal', signum
if mpstate.status.exit:
print 'Clean shutdown impossible, forcing an exit'
sys.exit(0)
else:
mpstate.status.exit = True
# Listen for kill signals to cleanly shutdown modules
fatalsignals = [signal.SIGTERM]
try:
fatalsignals.append(signal.SIGHUP)
fatalsignals.append(signal.SIGQUIT)
except Exception:
pass
if opts.daemon: # SIGINT breaks readline parsing - if we are interactive, just let things die
fatalsignals.append(signal.SIGINT)
for sig in fatalsignals:
signal.signal(sig, quit_handler)
load_module('link', quiet=True)
mpstate.settings.source_system = opts.SOURCE_SYSTEM
mpstate.settings.source_component = opts.SOURCE_COMPONENT
# open master link
for mdev in opts.master:
if not mpstate.module('link').link_add(mdev):
sys.exit(1)
if not opts.master and len(serial_list) == 1:
print("Connecting to %s" % serial_list[0])
mpstate.module('link').link_add(serial_list[0].device)
elif not opts.master:
wifi_device = '0.0.0.0:14550'
mpstate.module('link').link_add(wifi_device)
# open any mavlink output ports
for port in opts.output:
mpstate.mav_outputs.append(mavutil.mavlink_connection(port, baud=int(opts.baudrate), input=False))
if opts.sitl:
mpstate.sitl_output = mavutil.mavudp(opts.sitl, input=False)
mpstate.settings.streamrate = opts.streamrate
mpstate.settings.streamrate2 = opts.streamrate
if opts.state_basedir is not None:
mpstate.settings.state_basedir = opts.state_basedir
msg_period = mavutil.periodic_event(1.0/15)
heartbeat_period = mavutil.periodic_event(1)
heartbeat_check_period = mavutil.periodic_event(0.33)
mpstate.input_queue = Queue.Queue()
mpstate.input_count = 0
mpstate.empty_input_count = 0
if opts.setup:
mpstate.rl.set_prompt("")
# call this early so that logdir is setup based on --aircraft
(mpstate.status.logdir, logpath_telem, logpath_telem_raw) = log_paths()
if not opts.setup:
# some core functionality is in modules
standard_modules = ['log', 'wp', 'rally','fence','param','relay',
'tuneopt','arm','mode','calibration','rc','auxopt','misc','cmdlong',
'battery','terrain','output']
for m in standard_modules:
load_module(m, quiet=True)
if 'HOME' in os.environ and not opts.setup:
start_script = os.path.join(os.environ['HOME'], ".mavinit.scr")
if os.path.exists(start_script):
run_script(start_script)
if 'LOCALAPPDATA' in os.environ and not opts.setup:
start_script = os.path.join(os.environ['LOCALAPPDATA'], "MAVProxy", "mavinit.scr")
if os.path.exists(start_script):
run_script(start_script)
if opts.aircraft is not None:
start_script = os.path.join(opts.aircraft, "mavinit.scr")
if os.path.exists(start_script):
run_script(start_script)
else:
print("no script %s" % start_script)
if opts.console:
process_stdin('module load console')
if opts.map:
process_stdin('module load map')
for module in opts.load_module:
modlist = module.split(',')
for mod in modlist:
process_stdin('module load %s' % mod)
if opts.cmd is not None:
for cstr in opts.cmd:
cmds = cstr.split(';')
for c in cmds:
process_stdin(c)
if opts.profile:
import yappi # We do the import here so that we won't barf if run normally and yappi not available
yappi.start()
# log all packets from the master, for later replay
open_telemetry_logs(logpath_telem, logpath_telem_raw)
# run main loop as a thread
mpstate.status.thread = threading.Thread(target=main_loop, name='main_loop')
mpstate.status.thread.daemon = True
mpstate.status.thread.start()
# use main program for input. This ensures the terminal cleans
# up on exit
while (mpstate.status.exit != True):
try:
if opts.daemon:
time.sleep(0.1)
else:
input_loop()
except KeyboardInterrupt:
if mpstate.settings.requireexit:
print("Interrupt caught. Use 'exit' to quit MAVProxy.")
#Just lost the map and console, get them back:
for (m,pm) in mpstate.modules:
if m.name in ["map", "console"]:
if hasattr(m, 'unload'):
try:
m.unload()
except Exception:
pass
reload(m)
m.init(mpstate)
else:
mpstate.status.exit = True
sys.exit(1)
if opts.profile:
yappi.get_func_stats().print_all()
yappi.get_thread_stats().print_all()
#this loop executes after leaving the above loop and is for cleanup on exit
for (m,pm) in mpstate.modules:
if hasattr(m, 'unload'):
print("Unloading module %s" % m.name)
m.unload()
sys.exit(1)
|
gpl-3.0
|
meduz/scikit-learn
|
examples/neighbors/plot_nearest_centroid.py
|
58
|
1803
|
"""
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, .2]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
amirkdv/biseqt
|
experiments/num_seeds.py
|
1
|
14075
|
#!/usr/bin/env python
from matplotlib import pyplot as plt
from matplotlib import gridspec
import sys
from time import time
import numpy as np
from biseqt.sequence import Alphabet
from biseqt.stochastics import rand_seq, MutationProcess
from biseqt.seeds import SeedIndex
from biseqt.blot import band_radius, band_radii, H0_moments, H1_moments
from util import seq_pair, color_code
from util import plot_with_sd, with_dumpfile, log, savefig
from logging import WARN
@with_dumpfile
def sim_count_seeds(**kw):
ns, n_samples = kw['ns'], kw['n_samples']
gap, subst, wordlen = kw['gap'], kw['subst'], kw['wordlen']
log('simulating # of seeds (%d samples), lengths = %s' %
(n_samples, str(ns)))
def _zero(): return np.zeros((len(ns), n_samples))
A = Alphabet('ACGT')
seed_index_kw = {
'alphabet': A,
'wordlen': wordlen,
'path': kw.get('path', ':memory:'),
'log_level': WARN,
}
sim_data = {
'time': {'pos': _zero(), 'neg': _zero()},
'n_seeds': {'all': {'pos': _zero(), 'neg': _zero()},
'band': {'pos': _zero(), 'neg': _zero()}},
'gap': gap,
'match': (1 - gap) * (1 - subst),
'ns': ns,
'seed_index_kw': seed_index_kw,
}
M = MutationProcess(A, go_prob=gap, ge_prob=gap, subst_probs=subst)
for n_idx, n in enumerate(ns):
radius = band_radius(n, .4, 1 - 1e-4)
log('n = %d ' % n, newline=False)
for idx in range(n_samples):
sys.stderr.write('.')
S_rel, T_rel = seq_pair(n, A, mutation_process=M)
S_urel, T_urel = rand_seq(A, n), rand_seq(A, n)
for key, (S, T) in zip(['pos', 'neg'],
[(S_rel, T_rel), (S_urel, T_urel)]):
t = time()
seed_index = SeedIndex(S, T, **seed_index_kw)
n_seeds = seed_index.seed_count()
sim_data['time'][key][n_idx][idx] = time() - t
sim_data['n_seeds']['all'][key][n_idx][idx] = n_seeds
n_seeds = seed_index.seed_count(d_band=(-radius, radius))
sim_data['n_seeds']['band'][key][n_idx][idx] = n_seeds
sys.stderr.write('\n')
return sim_data
def plot_count_seeds_moments(sim_data, K=None, suffix=''):
ns, wordlen = sim_data['ns'], sim_data['seed_index_kw']['wordlen']
match = sim_data['match']
def _zero(): return {'all': [], 'band': []}
mus_H0, mus_H1, sds_H0, sds_H1 = _zero(), _zero(), _zero(), _zero()
for n in ns:
for mode in ['all', 'band']:
if mode == 'all':
area = n ** 2
else:
radius = band_radius(n, .4, 1 - 1e-4)
area = n * 2 * radius
mu_H0, sd_H0 = H0_moments(4, wordlen, area)
mus_H0[mode].append(mu_H0)
sds_H0[mode].append(sd_H0)
mu_H1, sd_H1 = H1_moments(4, wordlen, area, n, match)
mus_H1[mode].append(mu_H1)
sds_H1[mode].append(sd_H1)
fig = plt.figure(figsize=(11, 5))
ax_t = fig.add_subplot(1, 3, 1)
ax_mu = fig.add_subplot(1, 3, 2)
ax_sd = fig.add_subplot(1, 3, 3)
# time to find all seeds
kw = {'marker': 'o', 'markersize': 4, 'lw': 1, 'alpha': .8}
plot_with_sd(ax_t, ns, 1000 * sim_data['time']['neg'], axis=1, color='r',
label='unrelated', **kw)
plot_with_sd(ax_t, ns, 1000 * sim_data['time']['pos'], axis=1, color='g',
label='related', **kw)
kw = {'markersize': 3, 'lw': 1.2}
for mode, marker, alpha in zip(['all', 'band'], 'ox', [.7, .5]):
kw['alpha'] = alpha
kw['marker'] = marker
pos = sim_data['n_seeds'][mode]['pos']
neg = sim_data['n_seeds'][mode]['neg']
# average no. of seeds
kw['color'] = 'r'
ax_mu.plot(ns, neg.mean(axis=1), label='unrelated (%s)' % mode, **kw)
ax_mu.plot(ns, mus_H0[mode], ls='--', **kw)
kw['color'] = 'g'
ax_mu.plot(ns, pos.mean(axis=1), label='related (%s)' % mode, **kw)
ax_mu.plot(ns, mus_H1[mode], ls='--', **kw)
# std dev. of no of seeds
kw['color'] = 'r'
sds_emp = np.sqrt(neg.var(axis=1))
ax_sd.plot(ns, sds_emp, label='unrelated (%s)' % mode, **kw)
ax_sd.plot(ns, sds_H0[mode], ls='--', **kw)
kw['color'] = 'g'
sds_emp = np.sqrt(pos.var(axis=1))
ax_sd.plot(ns, sds_emp, label='related (%s)' % mode, **kw)
ax_sd.plot(ns, sds_H1[mode], ls='--', **kw)
_ns = np.arange(min(ns), max(ns))
ax_mu.plot(_ns, _ns, color='k', alpha=.9, lw=.5, ls='--')
ax_t.plot(_ns, _ns, color='k', alpha=.9, lw=.5, ls='--')
for ax in [ax_sd, ax_mu, ax_t]:
# ax.set_xlim(None, 1.1 * max(ns))
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('sequence length')
ax.set_xticks(ns)
ax.set_xticklabels(ns, rotation=90)
ax.legend(loc='lower right', fontsize=8)
ax_sd.set_ylabel('standard deviation of no. of matching %d-mers' % wordlen)
ax_mu.set_ylabel('average no. of matching %d-mers' % wordlen)
ax_t.set_ylabel('time to find matching %d-mers (ms)' % wordlen)
# n_samples = pos.shape[1]
savefig(fig, 'num_seeds_moments%s.png' % suffix)
def exp_count_seeds():
"""Shows theoretical and simulation results for the mean and variance of
the number of exactly matching kmers between related and unrelated
sequences as a function of sequence length. Theoretical predictions are
based on *m-dependent Central Limit Theorem* which suggests a limiting
Normal distribution with mean and variance given by
:func:`biseqt.blot.H0_moments` and :func:`biseqt.blot.H1_moments`.
**Supported Claims**
* The theoretical calculations of mean and variance for the number of
seeds, given by :func:`biseqt.blot.H0_moments` and
:func:`biseqt.blot.H1_moments` agree with simulations at least upto 25kbp
sequence lengths.
* Although the expected number of seeds is technically quadratic, the
highest order coefficient is so small that it can be considered
effectively linear in sequence length. Furthermore, we note that word
length provides expoenentially strong control on the number of seeds as
sequence lengths increase; hence maintaining a small quadratic
coefficient across biologically relevant sequence lengths (up to 1 Gbp)
is feasible with reasonable word lenghts (up to 30)
.. figure::
https://www.dropbox.com/s/fkd6u6gec6rzrjm/
num_seeds_moments%5Bw%3D8%5D.png?raw=1
:target:
https://www.dropbox.com/s/fkd6u6gec6rzrjm/
num_seeds_moments%5Bw%3D8%5D.png?raw=1
:alt: lightbox
Time to find all exactly matching 8-mers (*left*) for related (*green*)
and unrelated (*red*) sequences of varying lengths (n=50 samples for
each length; shaded regions show one standard deviation). Related
sequences are mutations of each other with substitution and gap
probabilities both equal to 0.1. For the same simulations, mean
(*middle*) and standard deviation (*right*) of the number of seeds as a
function of sequence length are shown (solid lines) with theoretical
predictions for each case (dashed lines). All axes are in log scale, and
the dotted black lines in the left and middle plots are y=x lines for
comparison.
"""
ns = [200 * 2 ** i for i in range(8)]
gap = .1
subst = .1
n_samples = 50
wordlen = 8
suffix = '[w=%d]' % wordlen
dumpfile = 'num_seeds%s.txt' % suffix
sim_data = sim_count_seeds(
ns=ns, n_samples=n_samples, gap=gap, subst=subst,
wordlen=wordlen, dumpfile=dumpfile, ignore_existing=False)
plot_count_seeds_moments(sim_data, suffix=suffix)
@with_dumpfile
def sim_count_seeds_segment(**kw):
Ks, g_radii, n_samples = kw['Ks'], kw['g_radii'], kw['n_samples']
gap, subst, wordlen = kw['gap'], kw['subst'], kw['wordlen']
def _zero(): return np.zeros((len(Ks), len(g_radii), n_samples))
A = Alphabet('ACGT')
seed_index_kw = {
'alphabet': A,
'wordlen': wordlen,
'path': kw.get('path', ':memory:'),
'log_level': WARN,
}
sim_data = {
'n_seeds': {'pos': _zero(), 'neg': _zero()},
'p_hat': {'pos': _zero(), 'neg': _zero()},
'gap': gap,
'match': (1 - gap) * (1 - subst),
'Ks': Ks,
'g_radii': g_radii,
'seed_index_kw': seed_index_kw,
}
M = MutationProcess(A, go_prob=gap, ge_prob=gap, subst_probs=subst)
for K_idx, K in enumerate(Ks):
log('K = %d' % K, newline=False)
for idx in range(n_samples):
sys.stderr.write('.')
S_rel, T_rel = seq_pair(K, A, mutation_process=M)
S_urel, T_urel = rand_seq(A, K), rand_seq(A, K)
for key, (S, T) in zip(['pos', 'neg'],
[(S_rel, T_rel), (S_urel, T_urel)]):
seed_index = SeedIndex(S, T, **seed_index_kw)
for g_idx, g_max in enumerate(g_radii):
radius = band_radius(K, g_max, 1 - 1e-4)
d_band = (-radius, radius)
n_seeds = seed_index.seed_count(d_band=d_band)
sim_data['n_seeds'][key][K_idx, g_idx, idx] = n_seeds
area = 2 * radius * K
word_p_null = (1./len(A)) ** wordlen
word_p = (n_seeds - area * word_p_null) / K
try:
p_hat = np.exp(np.log(word_p) / wordlen)
except Warning:
# presumably this happened because word_p was too small
p_hat = 0
p_hat = min(p_hat, 1)
sim_data['p_hat'][key][K_idx, g_idx, idx] = p_hat
sys.stderr.write('\n')
return sim_data
def plot_count_seeds_segment(sim_data, suffix=''):
Ks, g_radii = sim_data['Ks'], sim_data['g_radii']
match = sim_data['match']
fig = plt.figure(figsize=(10, 4))
grids = gridspec.GridSpec(1, 2, width_ratios=[5, 3])
ax_p = fig.add_subplot(grids[0])
ax_rad = fig.add_subplot(grids[1])
pad = min(Ks) / 3
colors = color_code(g_radii)
arrow_kw = {'marker': '>', 'c': 'k', 'markevery': 2, 'markersize': 2,
'lw': 1, 'alpha': .8}
ax_p.plot([Ks[0] - .2 * pad, Ks[0] - .9 * pad], [match, match], **arrow_kw)
ax_p.plot([Ks[0] - .2 * pad, Ks[0] - .9 * pad], [.25, .25], ls='--',
**arrow_kw)
kw_rad = {'marker': 'o', 'markersize': 3, 'lw': 1, 'alpha': .8}
kw_p = {'marker': 'o', 'markersize': 5, 'lw': 3, 'alpha': .5}
for g_idx, (g_max, color) in enumerate(zip(g_radii, colors)):
label = '$g_{\max} = %.2f$' % g_max
pos = sim_data['p_hat']['pos'][:, g_idx, :]
neg = sim_data['p_hat']['neg'][:, g_idx, :]
plot_with_sd(ax_p, Ks, neg, axis=1, color=color, ls='--', **kw_p)
plot_with_sd(ax_p, Ks, pos, axis=1, color=color, label=label, **kw_p)
ax_rad.plot(Ks, band_radii(Ks, g_max, 1 - 1e-4), color=color,
label=label, **kw_rad)
ax_p.set_ylim(-.2, 1.1)
for ax in [ax_p, ax_rad]:
ax.set_xlim(Ks[0] - pad, Ks[-1] + pad)
ax.set_xscale('log')
ax.set_xlabel('similarity length')
ax.set_xticks(Ks)
ax.set_xticklabels(Ks)
ax.legend(loc='best')
ax_p.set_ylabel('estimated match probability')
ax_rad.set_ylabel('diagonal band radius')
# n_samples = pos.shape[1]
savefig(fig, 'num_seeds_segment%s.png' % suffix)
def exp_count_seeds_segment():
"""Shows simulation results for alignment-free estimated match probability
for globally homologous sequence pairs of length up to 25kbp.
**Supported Claims**
* The match probability estimator provided by
:func:`biseqt.blot.WordBlot.estimate_match_probability` using the band
radius provided by :func:`biseqt.blot.band_radius` are accurate for
similarities of lengths up to 25kbp regardless of gap probability upper
bound. Therefore, we can justify using generous overestimates of gap
probability (e.g. :math:`g_{\max}=0.4`) in typical contexts.
* For unrelated sequences our estimator reports a number close to .25 (one
standard deviation range :math:`[0, .4]`).
.. figure::
https://www.dropbox.com/s/gnvb8eiiezyysuq/
num_seeds_segment%5Bw%3D6%5D.png?raw=1
:target:
https://www.dropbox.com/s/gnvb8eiiezyysuq/
num_seeds_segment%5Bw%3D6%5D.png?raw=1
:alt: lightbox
For multiple values of maximum allowed gap probability :math:`g_{\max}`
(which dictates diagonal band radius), estimated match probability
(using word length 6) is shown as a function of sequence length (*left*)
for globally homologous sequences (solid lines) and unrelated sequences
(dashed lines), n=50 samples, shaded regions show one standard
deviation. Homologous sequences were simulated by mutations with gap
probability 0.1 and substitution probability 0.15 (hence a match
probability of 0.77 indicated by a solid arrow (note agreement with
Word-Blot estimation), the dashed arrow shows the 0.25 point). For each
value of :math:`g_{\max}`, the corresponding band radius is shown as a
function of similarity length (right). Horizontal axes in both plots is
in log scale.
"""
Ks = [200 * 2 ** i for i in range(8)]
g_radii = [.05, .1, .2, .4]
gap = .1
subst = .15
n_samples = 50
wordlen = 6
suffix = '[w=%d]' % wordlen
dumpfile = 'num_seeds_segment%s.txt' % suffix
sim_data = sim_count_seeds_segment(
Ks=Ks, g_radii=g_radii, n_samples=n_samples,
gap=gap, subst=subst, wordlen=wordlen,
dumpfile=dumpfile)
plot_count_seeds_segment(sim_data, suffix=suffix)
if __name__ == '__main__':
exp_count_seeds()
exp_count_seeds_segment()
|
bsd-3-clause
|
reychil/project-alpha-1
|
code/utils/scripts/pca_script.py
|
1
|
1857
|
"""
Script to identify outliers for each subject. Compares the mean MRSS values from running GLM on the basic np.convolve convolved time course, before and after dropping the outliers.
"""
import numpy as np
import nibabel as nib
import os
import sys
import pandas as pd
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
# Relative paths to project and data.
project_path = "../../../"
path_to_data = project_path+"data/ds009/"
location_of_images = project_path+"images/"
location_of_functions = project_path+"code/utils/functions/"
behav_suffix = "/behav/task001_run001/behavdata.txt"
sys.path.append(location_of_functions)
from event_related_fMRI_functions import hrf_single, convolution_specialized
from noise_correction import mean_underlying_noise, fourier_predict_underlying_noise
sub_list = os.listdir(path_to_data)[0:2]
# saving to compare number of cuts in the beginning
num_cut=np.zeros(len(sub_list))
i=0
# Loop through all the subjects.
for name in sub_list:
# amount of beginning TRs not standardized at 6
behav=pd.read_table(path_to_data+name+behav_suffix,sep=" ")
num_TR = float(behav["NumTRs"])
# Load image data.
img = nib.load(path_to_data+ name+ "/BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
# Drop the appropriate number of volumes from the beginning.
first_n_vols=data.shape[-1]
num_TR_cut=int(first_n_vols-num_TR)
num_cut[i]=num_TR_cut
i+=1
data = data[...,num_TR_cut:]
data_2d = data.reshape((-1,data.shape[-1]))
# Run PCA on the covariance matrix and plot explained variance.
pca = PCA(n_components=20)
pca.fit(data_2d.T.dot(data_2d))
exp_var = pca.explained_variance_ratio_
plt.plot(range(1,21), exp_var)
plt.savefig(location_of_images+'pca'+name+'.png')
plt.close()
|
bsd-3-clause
|
fabianp/scikit-learn
|
sklearn/preprocessing/__init__.py
|
268
|
1319
|
"""
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
|
bsd-3-clause
|
MadsJensen/RP_scripts
|
graph_pagerank_ada_post.py
|
1
|
2154
|
import numpy as np
import bct
from sklearn.externals import joblib
from my_settings import (bands, source_folder)
from sklearn.ensemble import AdaBoostClassifier
from sklearn.cross_validation import (StratifiedKFold, cross_val_score)
from sklearn.grid_search import GridSearchCV
subjects = [
"0008", "0009", "0010", "0012", "0014", "0015", "0016", "0017", "0018",
"0019", "0020", "0021", "0022"
]
cls_all = []
pln_all = []
scores_all = np.empty([4, 6])
for subject in subjects:
cls = np.load(source_folder + "graph_data/%s_classic_pow_post.npy" %
subject).item()
pln = np.load(source_folder + "graph_data/%s_plan_pow_post.npy" %
subject).item()
cls_all.append(cls)
pln_all.append(pln)
for k, band in enumerate(bands.keys()):
data_cls = []
for j in range(len(cls_all)):
tmp = cls_all[j][band]
data_cls.append(
np.asarray(
[bct.centrality.pagerank_centrality(
g, d=0.85) for g in tmp]).mean(axis=0))
data_pln = []
for j in range(len(pln_all)):
tmp = pln_all[j][band]
data_pln.append(
np.asarray(
[bct.centrality.pagerank_centrality(
g, d=0.85) for g in tmp]).mean(axis=0))
data_cls = np.asarray(data_cls)
data_pln = np.asarray(data_pln)
X = np.vstack([data_cls, data_pln])
y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])
cv = StratifiedKFold(y, n_folds=6, shuffle=True)
cv_params = {
"learning_rate": np.arange(0.1, 1.1, 0.1),
'n_estimators': np.arange(1, 80, 2)
}
grid = GridSearchCV(
AdaBoostClassifier(),
cv_params,
scoring='accuracy',
cv=cv,
n_jobs=1,
verbose=1)
grid.fit(X, y)
ada_cv = grid.best_estimator_
scores = cross_val_score(ada_cv, X, y, cv=cv)
scores_all[k, :] = scores
# save the classifier
joblib.dump(
ada_cv,
source_folder + "graph_data/sk_models/pagerank_ada_post_%s.plk" % band)
np.save(source_folder + "graph_data/pagerank_scores_all_post.npy", scores_all)
|
bsd-3-clause
|
zooniverse/aggregation
|
experimental/algorithms/jungle3.py
|
1
|
5210
|
__author__ = 'ggdhines'
import matplotlib
matplotlib.use('WXAgg')
import aggregation_api
import cv2
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from scipy.spatial import distance
ref = [[234,218,209],]
def analyze(f_name,display=False):
image = cv2.imread(f_name)
x_lim,y_lim,_ = image.shape
a = image.reshape(x_lim*y_lim,3)
dist = distance.cdist(a,ref).reshape(x_lim,y_lim)
y_pts,x_pts = np.where(dist>50)
pts = zip(x_pts,y_pts)
pts.sort(key = lambda p:p[0])
print "here"
current_x = pts[0][0]
# clusters = [[pts[0][1],],]
to_plot = {}
to_cluster_y = []
for (x,y) in pts:
if x == current_x:
to_cluster_y.append(y)
else:
to_cluster_y.sort()
to_plot[current_x] = [[to_cluster_y[0]],]
for p in to_cluster_y[1:]:
# print to_plot
if (p - to_plot[current_x][-1][-1]) > 2:
to_plot[current_x].append([p])
else:
to_plot[current_x][-1].append(p)
to_cluster_y = []
current_x = x
filtered_x = []
filtered_y = []
for x in to_plot:
# print to_plot[x]
values = []
for c in to_plot[x]:
# values.extend(c)
if 1 < len(c) < 10:
plt.plot([x for _ in c],c,".")
filtered_x.extend([x for _ in c])
filtered_y.extend([[i,] for i in c])
plt.ylim((max(y_pts),0))
plt.show()
filtered_x = np.asarray(filtered_x)
filtered_y = np.asarray(filtered_y)
db = DBSCAN(eps=1, min_samples=15).fit(filtered_y)
labels = db.labels_
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
print len(unique_labels)
for k, col in zip(unique_labels, colors):
if k == -1:
continue
index_filter = np.where(labels == k)
x_val = filtered_x[index_filter]
# print len(x_val)
# if len(x_val) < 20:
# continue
percentage = len(x_val)/float(max(x_val)-min(x_val))
# if percentage <0.1:
# continue
#
# if len(x_val) < 3:
# continue
y_val = [f[0] for f in filtered_y[index_filter]]
plt.plot(x_val, y_val, 'o', markerfacecolor=col)
plt.ylim((max(y_pts),0))
plt.show()
# for x in range(min(x_pts),max(x_pts)):
# print x
# id_ = np.where(x_pts==x)
# # restricted_y = [[p,] for p in y_pts[id_]]
# restricted_y = y_pts[id_]
#
# clusters = [[restricted_y[0],],]
# for y in restricted_y[1:]:
# if y-clusters[-1][-1] <= 2:
# clusters[-1].append(y)
# else:
# clusters.append([y])
#
# for c in clusters:
# plt.plot([x for _ in c],c,"o")
# continue
#
# db = DBSCAN(eps=4, min_samples=1).fit(restricted_y)
# labels = db.labels_
#
# unique_labels = set(labels)
# colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
# for k, col in zip(unique_labels, colors):
# if k == -1:
# continue
#
# class_member_mask = (labels == k)
# # print class_member_mask
#
#
# y_cluster = temp_y[class_member_mask]
#
# if len(y_cluster) < 20:
# plt.plot([x for _ in range(len(y_cluster))], y_cluster, 'o', markerfacecolor=col)
# # xPts.extend([x for _ in range(len(y))])
# # yPts.extend(y)
# plt.show()
#
# if display:
# plt.plot(x,y,".")
# plt.ylim((y_lim,0))
# plt.show()
#
# return
#
# n, bins, patches = plt.hist(y,range(min(y),max(y)+1))
# med = np.median(n)
#
#
# peaks = [i for i,count in enumerate(n) if count > 2*med]
# buckets = [[peaks[0]]]
#
#
#
#
# for j,p in list(enumerate(peaks))[1:]:
# if (p-1) != (peaks[j-1]):
# buckets.append([p])
# else:
# buckets[-1].append(p)
#
# bucket_items = list(enumerate(buckets))
# bucket_items.sort(key = lambda x:len(x[1]),reverse=True)
#
# a = bucket_items[0][0]
# b = bucket_items[1][0]
#
#
# vert = []
# for x,p in bucket_items:
# if (a <= x <= b) or (b <= x <= a):
# vert.extend(p)
#
# print vert
# print min(vert)
# print max(vert)
#
# if display:
# plt.plot((min(y),max(y)+1),(2*med,2*med))
# plt.show()
#
# n, bins, patches = plt.hist(x,range(min(x),max(x)+1))
# med = np.median(n)
#
# plt.plot((min(x),max(x)+1),(2*med,2*med))
# plt.plot((min(x),max(x)+1),(1*med,1*med),color="red")
# plt.xlim((0,max(x)+1))
# plt.show()
if __name__ == "__main__":
# project = aggregation_api.AggregationAPI(153,"development")
# f_name = project.__image_setup__(1125393)
f_name = "/home/ggdhines/Databases/images/e1d11279-e515-42f4-a4d9-8e8a40a28425.jpeg"
analyze(f_name,display=True)
|
apache-2.0
|
zuku1985/scikit-learn
|
examples/cluster/plot_birch_vs_minibatchkmeans.py
|
333
|
3694
|
"""
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
|
bsd-3-clause
|
giacomov/astromodels
|
setup.py
|
2
|
10628
|
#!/usr/bin/env python
import ctypes.util
import glob
import sys
import os
import re
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
# This is needed to use numpy in this module, and should work whether or not numpy is
# already installed. If it's not, it will trigger an installation
class My_build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
self.include_dirs.append('astromodels/xspec/include')
def sanitize_lib_name(library_path):
"""
Get a fully-qualified library name, like /usr/lib/libgfortran.so.3.0, and returns the lib name needed to be
passed to the linker in the -l option (for example gfortran)
:param library_path:
:return:
"""
lib_name = os.path.basename(library_path)
# Some regexp magic needed to extract in a system-independent (mac/linux) way the library name
tokens = re.findall("lib(.+)(\.so|\.dylib|\.a)(.+)?", lib_name)
if not tokens:
raise RuntimeError('Attempting to find %s in directory %s but there are no libraries in this directory'%(lib_name,library_path))
return tokens[0][0]
def find_library(library_root, additional_places=None):
"""
Returns the name of the library without extension
:param library_root: root of the library to search, for example "cfitsio_" will match libcfitsio_1.2.3.4.so
:return: the name of the library found (NOTE: this is *not* the path), and a directory path if the library is not
in the system paths (and None otherwise). The name of libcfitsio_1.2.3.4.so will be cfitsio_1.2.3.4, in other words,
it will be what is needed to be passed to the linker during a c/c++ compilation, in the -l option
"""
# find_library searches for all system paths in a system independent way (but NOT those defined in
# LD_LIBRARY_PATH or DYLD_LIBRARY_PATH)
first_guess = ctypes.util.find_library(library_root)
if first_guess is not None:
# Found in one of the system paths
if sys.platform.lower().find("linux") >= 0:
# On linux the linker already knows about these paths, so we
# can return None as path
return sanitize_lib_name(first_guess), None
elif sys.platform.lower().find("darwin") >= 0:
# On Mac we still need to return the path, because the linker sometimes
# does not look into it
return sanitize_lib_name(first_guess), os.path.dirname(first_guess)
else:
# Windows is not supported
raise NotImplementedError("Platform %s is not supported" % sys.platform)
else:
# could not find it. Let's examine LD_LIBRARY_PATH or DYLD_LIBRARY_PATH
# (if they sanitize_lib_name(first_guess), are not defined, possible_locations will become [""] which will
# be handled by the next loop)
if sys.platform.lower().find("linux") >= 0:
# Unix / linux
possible_locations = os.environ.get("LD_LIBRARY_PATH", "").split(":")
elif sys.platform.lower().find("darwin") >= 0:
# Mac
possible_locations = os.environ.get("DYLD_LIBRARY_PATH", "").split(":")
else:
raise NotImplementedError("Platform %s is not supported" % sys.platform)
if additional_places is not None:
possible_locations.extend(additional_places)
# Now look into the search paths
library_name = None
library_dir = None
for search_path in possible_locations:
if search_path == "":
# This can happen if there are more than one :, or if nor LD_LIBRARY_PATH
# nor DYLD_LIBRARY_PATH are defined (because of the default use above for os.environ.get)
continue
results = glob.glob(os.path.join(search_path, "lib%s*" % library_root))
if len(results) >= 1:
# Results contain things like libXS.so, libXSPlot.so, libXSpippo.so
# If we are looking for libXS.so, we need to make sure that we get the right one!
for result in results:
if re.match("lib%s[\-_\.]" % library_root, os.path.basename(result)) is None:
continue
else:
# FOUND IT
# This is the full path of the library, like /usr/lib/libcfitsio_1.2.3.4
library_name = result
library_dir = search_path
break
else:
continue
if library_name is not None:
break
if library_name is None:
return None, None
else:
# Sanitize the library name to get from the fully-qualified path to just the library name
# (/usr/lib/libgfortran.so.3.0 becomes gfortran)
return sanitize_lib_name(library_name), library_dir
# Get the version number
vfilename = "astromodels/version.py"
exec(compile(open(vfilename, "rb").read(), vfilename, 'exec'))
#execfile('astromodels/version.py')
def setup_xspec():
headas_root = os.environ.get("HEADAS")
if headas_root is None:
# See, maybe we are running in Conda
conda_prefix = os.environ.get("CONDA_PREFIX")
if conda_prefix is None:
# Maybe this is a Conda build
conda_prefix = os.environ.get("PREFIX")
if conda_prefix is not None:
# Yes, this is Conda
# Let's see if the package xspec-modelsonly has been installed by checking whether one of the Xspec
# libraries exists within conda
conda_lib_path = os.path.join(conda_prefix, 'lib')
this_lib, this_lib_path = find_library('XSFunctions', additional_places=[conda_lib_path])
if this_lib is None:
# No, there is no library in Conda
print("No xspec-modelsonly package has been installed in Conda. Xspec support will not be installed")
print("Was looking into %s" % conda_lib_path)
return None
else:
print("The xspec-modelsonly package has been installed in Conda. Xspec support will be installed")
# Set up the HEADAS variable so that the following will find the libraries
headas_root = conda_prefix
else:
print("No HEADAS env. variable set. Xspec support will not be installed ")
return None
else:
print("\n Xspec is detected. Will compile the Xspec extension.\n")
# Make sure these libraries exist and are linkable right now
# (they need to be in LD_LIBRARY_PATH or DYLD_LIBRARY_PATH or in one of the system paths)
libraries_root = ['XSFunctions', 'XSModel', 'XSUtil', 'XS', 'cfitsio', 'CCfits', 'wcs', 'gfortran']
libraries = []
library_dirs = []
for lib_root in libraries_root:
this_library, this_library_path = find_library(lib_root, additional_places=[os.path.join(headas_root, 'lib')])
if this_library is None:
raise IOError("Could not find library %s. Impossible to compile Xspec" % lib_root)
else:
print("Found library %s in %s" % (this_library, this_library_path))
libraries.append(this_library)
if this_library_path is not None:
# This library is not in one of the system path library, we need to add
# it to the -L flag during linking. Let's put it in the library_dirs list
# which will be used in the Extension class
library_dirs.append(this_library_path)
# Remove duplicates from library_dirs
library_dirs = list(set(library_dirs))
# Configure the variables to build the external module with the C/C++ wrapper
ext_modules_configuration = [
Extension("astromodels.xspec._xspec",
["astromodels/xspec/src/_xspec.cc", ],
libraries=libraries,
library_dirs=library_dirs,
runtime_library_dirs=library_dirs,
extra_compile_args=[])]
return ext_modules_configuration
# Normal packages
packages = ['astromodels',
'astromodels/core',
'astromodels/functions',
'astromodels/functions/dark_matter',
'astromodels/sources',
'astromodels/utils',
'astromodels/xspec',
'astromodels/tests'
]
# Check whether we can compile Xspec support
ext_modules_configuration = setup_xspec()
# Add the node_ctype module
# This defines the external module
node_ctype_ext = Extension('astromodels.core.node_ctype',
sources = ['astromodels/core/node_ctype/node_ctype.cxx'],
extra_compile_args=[]) # '-UNDEBUG' for debugging
if ext_modules_configuration is None:
# No Xspec
ext_modules_configuration = [node_ctype_ext]
else:
ext_modules_configuration.append(node_ctype_ext)
setup(
name="astromodels",
setup_requires=['numpy'],
cmdclass={'build_ext': My_build_ext},
packages=packages,
data_files=[('astromodels/data/functions', glob.glob('astromodels/data/functions/*.yaml'))],
# The __version__ comes from the exec at the top
version=__version__,
description="Astromodels contains models to be used in likelihood or Bayesian analysis in astronomy",
author='Giacomo Vianello',
author_email='[email protected]',
url='https://github.com/giacomov/astromodels',
download_url='https://github.com/giacomov/astromodels/archive/v0.1',
keywords=['Likelihood', 'Models', 'fit'],
classifiers=[],
install_requires=[
'numpy >= 1.6',
'PyYAML',
'astropy >= 1.2',
'scipy>=0.14',
'numdifftools',
'tables',
'pandas',
'dill'],
extras_require={
'tests': [
'pytest', ],
'docs': [
'sphinx >= 1.4',
'sphinx_rtd_theme',
'nbsphinx',
'sphinx-autoapi']},
ext_modules=ext_modules_configuration,
package_data={
'astromodels': ['data/dark_matter/*'],
},
include_package_data=True,
)
|
bsd-3-clause
|
drpngx/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py
|
46
|
13101
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features],
0), array_ops.concat([labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(predictions['class'],
np.argmax(predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
|
apache-2.0
|
vavuq/vavuq
|
VAVUQ.py
|
1
|
57168
|
#!/usr/bin/env python
"""
VAVUQ (Verification And Validation and Uncertainty Quantification) can
be used as a general purpose program for verification, validation, and
uncertainty quantification. The motivation for the creation of and
continued development of the program is to provide a cost effective and
easy way to assess the quality of computational approximations. The hope
is that the methods used in this program can be applied to fields such
as civil engineering where they are currently often underutilized. This
code was created through efforts from Bombardelli's group and Dr. Bill
Fleenor at the University of California Davis. The creators belong to
the Civil & Environmental Engineering (CEE) Department, the Center for
Watershed Sciences (CWS), and the Delta Solution Team (DST). The main
code development is headed by Kaveh Zamani and James E. Courtney.
==============================================================================
Reference: please see "Verification and Validation in Scintific
Computing" by William L. Oberkampf and Chistopher J. Roy. Chapter 8, or
Zamani, K., Bombardelli, F. A. (2014) "Analytical solutions of nonlinear
and variable-parameter transport equations for verification of numerical
solvers." Environmental Fluid Mechanics.
==============================================================================
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# This code uses: Scipy, Pandas, Numpy, Math, Matplotlib, Tkinter
# plot options [1:on, 0:off]
popt = [None]*8
popt[0] = 0 # 3D surface plot (user interaction)
popt[1] = 0 # Triangular 2D surface plot
popt[2] = 0 # Interpolated triangular 2D surface plot
popt[3] = 0 # Image plot (suppressed if unstructured mesh)
popt[4] = 0 # 3D scatter plot (user interaction)
popt[5] = 1 # Rect surface plot
popt[6] = 0 # 2D scatter
popt[7] = 0 # 2D Contour
# execute plot options
def plotmain(popt,x,y,z,titl):
if popt[0]: surfplot(x,y,z,titl)
if popt[1]: trisurfplot(x,y,z,titl)
if popt[2]: triconplot(x,y,z,titl)
if popt[3]: plotim(x,y,z,titl)
if popt[4]: scatplot(x,y,z,titl)
if popt[5]: sqsurfplot(x,y,z,titl)
if popt[6]: scat2d(x,y,z,titl)
if popt[7]: cont2d(x,y,z,titl)
# This function is based on Equation 8.73 in 'Verification and Validation
# in Scientific Computing' by W. Oberkampf and C. Roy on page 320.
# NCS: No Converged Solution
def calc_con(Lcoarse, Lfine, rc, rf):
import numpy as np
import math
p = 0.5
AA = math.log10(rf*rc)
if Lfine == 0.:
return 0. # NCS
BB = abs(Lcoarse/Lfine)
if math.isnan(BB):
return 0. # NCS
i = 1
while True:
if i > 10000:
return 0. # NCS
i += 1
x = (rf**p - 1.0)*BB + rf**p
if x < 0.:
return 0. # NCS
o = p
p = math.log10(x)/AA
if abs(o-p) < 1e-12:
break
return p
'''
The coarse grid convergence index (GCI) is calculated based on
Roache, P.J. 1994. "Perspective: A Method for Uniform Reporting
of Grid Refinement Studies. Journal of Fluids Engineering.
September 1994, Vol. 116/405"
fe: fractional error
f1: fine grid solution
f2: coarse gride solution
r: refinment
p: order of accuracy'''
def GCI_coarse(f1,f2,r,p):
return r**p * GCI_fine(f1,f2,r,p)
'''
The fine grid convergence index (GCI) is calculated based on
ASME V&V 20-2009
'''
def GCI_fine(f1,f2,r,p,fs):
# calculate extrapolated value
ex = (r**p*f1 - f2) / (r**p - 1.)
# calculate error estimate
ee = abs((f1 - f2) / (f1 + 1e-12))
# calculate extrapolated relative error
er = abs((ex - f1) / (ex + 1e-12))
# calculate GCI
GCI = (fs*ee) / (r**p - 1.)
return (GCI,er)
"""
Calculates Statistics Validation
----------------------------------------------------
M is model data or prediction data
O is benchmark or measured data (observed data)
Paper: "Comparison of different effciency criteria for hydrological
model assesment by Krause, Boyle and Base (2005)
Nash_Sutcliffe efficiency E Nash, J. E. and J. V. Sutcliffe (1970),
River flow forecasting through conceptual models part I -A discussion
of principles, Journal of Hydrology, 10 (3), 282-290
"""
def statistics_validation(M,O):
import numpy as np
from scipy.stats import ks_2samp, chisquare
M, O = M.ravel(), O.ravel()
wn = ''
if abs(np.mean(M)) < 0.001:
M, O = M + 1., O + 1.
eo = '***Results are shifted one unit to avoid division by zero***'
wn = ''.join([wn, eo+'\n'])
bias = np.sum(M - O) / O.size
rms = np.sqrt(np.mean(np.square(M - O)))
SI = rms/np.mean(M)
x2 = O - np.mean(M)
y2 = O - np.mean(O)
R2 = (np.sum(np.multiply(x2, y2)) / \
(np.sum(np.square(x2)) * np.sum(np.square(y2)))**0.5)**2
E = 1. - np.sum(np.square(M-O)) / np.sum(np.square(y2))
KS = ks_2samp(M.A.squeeze(),O.A.squeeze())
CH = chisquare(M.A.squeeze(), f_exp = O.A.squeeze())
eo = 'Bias = %(bias)s \n' \
'Scatter Index = %(SI)s \n' \
'RMSE = %(rms)s \n' \
'Coefficient of Determination = %(R2)s \n' \
'NSE = %(E)s \n' % locals()
eo += 'K-S(stat, p-value) = (%0.4e, %0.4e)\n' % (KS[0],KS[1])
eo += 'Chi Sq(stat, p-value) = (%0.4e, %0.4e)' % (CH[0],CH[1])
wn = ''.join([wn, eo+'\n'])
return wn
# read input file
def odat(fname):
import numpy as np
ex = fname.split('.')[-1].lower()
try:
if (ex == 'xlsx') | (ex == 'xls'):
from pandas import ExcelFile
with ExcelFile(fname) as sn:
x = [np.matrix(sn.parse(i, header=None).values)
for i, n in enumerate(sn.sheet_names)]
elif ex == 'h5':
from pandas import HDFStore, read_hdf
with HDFStore(fname) as df:
x = [np.matrix(read_hdf(fname, k).values) for k in df.keys()]
elif ex == 'csv':
from pandas import read_csv
df = read_csv(fname, header=None)
x = []
j = 0
for i in range(len(df.columns)/3):
z = np.asarray(df.values)[:,j:j+3]
z = z[~np.any(np.isnan(z), axis=1)]
x.append(np.matrix(z))
j += 3
elif (ex == 'txt') | (ex == 'dat'):
with open(fname) as fn:
icom = fn.readline()
if ',' in icom:
com = True
else:
com = False
with open(fname) as fn:
if not com:
from pandas import read_table
df = read_table(fn, sep='\t', header=None)
else:
from pandas import read_csv
df = read_csv(fn, header=None)
x = []
j = 0
for i in range(len(df.columns)/3):
z = np.asarray(df.values)[:,j:j+3]
z = z[~np.any(np.isnan(z), axis=1)]
x.append(np.matrix(z))
j += 3
except:
wrng('\nFile format error during read',rw)
return x
# structured interpolation
def inter_fun(x,y,z,XI,YI,interp_method):
import numpy as np
XI, YI = np.asarray(XI), np.asarray(YI)
x, y = (w.ravel().tolist()[0] for w in [x, y])
if interp_method == 'Spline':
from scipy.interpolate import RectBivariateSpline
x, y = np.sort(np.unique(x)), np.sort(np.unique(y))
if len(x) != len(y): spl = RectBivariateSpline(x,y,z.H)
else: spl = RectBivariateSpline(x,y,z)
imat = spl.ev(XI, YI)
elif interp_method == 'Cubic':
from scipy.interpolate import griddata
z = z.ravel().tolist()[0]
imat = griddata((x, y), z, (XI, YI), method = 'cubic')
elif interp_method == 'Linear':
from scipy.interpolate import griddata
z = z.ravel().tolist()[0]
imat = griddata((x, y), z, (XI, YI), method = 'linear')
return imat
# unstructured interpolation
def inter_fun_un(x,y,z,XI,YI,interp_method):
import numpy as np
XI, YI = np.asarray(XI), np.asarray(YI)
x, y = (w.ravel().tolist()[0] for w in [x, y])
if interp_method == 'Spline':
from scipy.interpolate import SmoothBivariateSpline
spl = SmoothBivariateSpline(x,y,z)
imat = spl.ev(XI, YI)
elif interp_method == 'Cubic':
from scipy.interpolate import Rbf
rbf = Rbf(x, y, z, function='cubic')
imat = rbf(XI, YI)
elif interp_method == 'Linear':
from scipy.interpolate import Rbf
rbf = Rbf(x, y, z, function='linear')
imat = rbf(XI, YI)
return imat
# return number of unique values
def unval(x):
import numpy as np
return len(np.unique(x.ravel().tolist()[0]))
# image plot
def plotim(x,y,z,titl):
import matplotlib.pyplot as plt
im = plt.imshow(z, cmap='jet', interpolation='none')
plt.colorbar(im, orientation='horizontal')
plt.title(titl)
titl += '(img).png'
plt.savefig(titl, bbox_inches='tight')
plt.close()
# surface plot
def sqsurfplot(x,y,z,titl):
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import warnings
fig = plt.figure(facecolor="white")
warnings.simplefilter("ignore")
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0.5, antialiased=True)
#ax.zaxis.set_major_locator(LinearLocator(10))
#if np.max(z) < 0.0001:
# ax.zaxis.set_major_formatter(FormatStrFormatter('%.03e'))
ax.set_xlabel('x')
ax.set_ylabel('y')
#ax.zaxis.set_major_formatter(FormatStrFormatter('%.0e'))
#ax.set_zlabel('Head (m)')
fig.colorbar(surf, shrink=0.5, aspect=5)
with warnings.catch_warnings():
plt.title(titl)
plt.tight_layout()
plt.show()
plt.close()
# triange surface plot
def surfplot(x,y,z,titl):
import matplotlib.pyplot as plt
import numpy as np
import warnings
x,y,z = (np.squeeze(np.asarray(np.reshape(w,(w.size,1))))
for w in [x,y,z])
fig = plt.figure()
warnings.simplefilter("ignore")
ax = fig.gca(projection='3d')
ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)
with warnings.catch_warnings():
plt.title(titl)
plt.tight_layout()
plt.show()
plt.close()
# scatter plot
def scatplot(x,y,z,titl):
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import warnings
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
x,y,z = (np.squeeze(np.asarray(np.reshape(w,(w.size,1))))
for w in [x,y,z])
fig = plt.figure(facecolor="white")
warnings.simplefilter("ignore")
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z, c=z, cmap=mpl.cm.gray)
ax.set_xlabel('x')
ax.set_ylabel('y')
with warnings.catch_warnings():
plt.title(titl)
plt.tight_layout()
plt.show()
plt.close()
# trisurface plot
def trisurfplot(x,y,z,titl):
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import numpy as np
from matplotlib import cm
from matplotlib.tri import Triangulation
x,y,z = (np.squeeze(np.asarray(np.reshape(w,(w.size,1))))
for w in [x,y,z])
triang = tri.Triangulation(x, y)
plt.figure()
plt.gca().set_aspect('equal')
plt.tripcolor(triang, z, shading='flat', cmap=plt.cm.rainbow,
edgecolors='k')
plt.colorbar()
plt.title(titl)
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig(titl+'(tri).png', bbox_inches='tight')
plt.close()
# interpolated trisurface plot
def triconplot(x,y,z,titl):
import matplotlib.pyplot as plt
import numpy as np
import warnings
from matplotlib import cm
from matplotlib.tri import Triangulation
from matplotlib.tri import UniformTriRefiner
x,y,z = (np.squeeze(np.asarray(np.reshape(w,(w.size,1))))
for w in [x,y,z])
triang = Triangulation(x, y)
refiner = UniformTriRefiner(triang)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tri_refi, z_test_refi = refiner.refine_field(z, subdiv=3)
plt.figure()
plt.gca().set_aspect('equal')
plt.triplot(triang, lw=0.5, color='black')
levels = np.arange(min(z), max(z), (max(z)-min(z))/100.)
cmap = cm.get_cmap(name='jet', lut=None)
plt.tricontourf(tri_refi, z_test_refi, levels=levels, cmap=cmap)
plt.colorbar()
plt.title(titl)
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig(titl+'(interp).png', bbox_inches='tight')
plt.close()
# 2D scatter plot
def scat2d(x,y,z,titl):
import matplotlib.pyplot as plt
import numpy as np
x,y,z = (np.squeeze(np.asarray(np.reshape(w,(w.size,1))))
for w in [x,y,z])
plt.scatter(x, y)
plt.show()
# 2D contour plot (structured)
def cont2d(x,y,z,titl):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import warnings
from scipy.interpolate import griddata
XI, YI = np.linspace(x.min(),x.max(),100),np.linspace(y.min(),y.max(),100)
XI, YI = np.meshgrid(XI, YI)
x,y,z = (np.squeeze(np.asarray(np.reshape(w,(w.size,1))))
for w in [x,y,z])
warnings.simplefilter("ignore")
mpl.rcParams['xtick.direction'] = 'out'
mpl.rcParams['ytick.direction'] = 'out'
with warnings.catch_warnings():
imat = griddata( (x, y), z, (XI, YI), method = 'cubic')
CS = plt.contour(XI, YI, imat)
plt.clabel(CS, inline=1, fontsize=10)
plt.title(titl)
plt.show()
# calculate convergence accross entire mesh
def matcon(errc,errf,rc,rf):
import numpy as np
con = np.matrix([[calc_con(errc[i,j],errf[i,j],rc,rf)
for j in range(errc.shape[1]) ] for i in range(errc.shape[0])])
return con
#check number of elements in mesh
class ArrayLengthError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def arrdimch(ny,nx,x,det):
mul = ny*nx
al = len(x)
if mul != al:
eo = '%(det)s Mesh: Number of unique x and y values ' \
'(%(nx)s, %(ny)s) do not multiply to the array length of the ' \
'input. \n (%(mul)s /= %(al)s)!' % locals()
raise ArrayLengthError(eo)
# cut values outside of analysis area
def excut(x,y,p,xmin,xmax,ymin,ymax):
import numpy as np
exc = np.where((x<xmin) | (x>xmax) | (y<ymin) | (y>ymax))
exc = np.unique(np.asarray(exc))
x,y,p = (np.delete(w,exc,0) for w in [x,y,p])
return x,y,p
################
# Main Program #
################
def Validation(pd2, pd1): # Validation Calculations
global var4_1
import numpy as np
from numpy import linalg as LA
i = 0
x = odat(fname)
num = x[0] # 1st tab: numerical data
benchmark = x[1] # 2nd tab: benchmark data
eo = 'Validation data is loaded: %s' % (fname,)
i += 1
# text format tags
txt.tag_config("n", background="#e7efff", foreground="red")
txt.tag_config("a", foreground="black")
txt.tag_config("w", background="#ffffff", foreground="red")
label = tk.Label(root, text="T")
font = tkFont.Font(font=label['font'])
txt.tag_config("b", foreground="black",
font=(font.actual()['family'],
'-'+str(font.actual()['size']),"bold"))
txt.tag_raise("sel")
txt.insert(str(i)+'.0',eo+'\n', ("n", "a"))
txt.mark_set(tk.INSERT,str(i)+'.0')
if np.asarray(num).shape[1] > 1:
num,benchmark = (np.asarray(w).ravel() for w in [num,benchmark])
# full
i += 1
txt.insert(str(i)+'.0','***Full***\n', ("w", "b"))
eo = statistics_validation(num,benchmark)
i += 1
txt.insert(str(i)+'.0',eo)
if var4_1.get() == 'Yes':
def ctwt(url,num,benchmark,ndat,fmt): # calc and write sections
url = np.unique(np.asarray(url))
if (url.shape[0] >= 0) & (url.shape[0] < len(num)):
eo = '%s of %s data points used' % (len(num)-url.shape[0],
len(num))
if fmt:
txt.insert(tk.END,eo,fmt)
else:
txt.insert(tk.END,eo)
urb,urn = (np.delete(w,url) for w in [benchmark,num])
eo = statistics_validation(urn,urb)
if fmt:
txt.insert(tk.END,'\n'+eo,fmt)
else:
txt.insert(tk.END,'\n'+eo)
else:
if fmt:
txt.insert(tk.END,ndat+'\n',fmt)
else:
txt.insert(tk.END,ndat+'\n')
brk = 16*'-'+'\n'
# middle
txt.insert(tk.END,'\n')
txt.insert(tk.END,'***Middle***\n', ("n", "b"))
rng = max(max(benchmark),max(num)) - min(min(benchmark),min(num))
urc = min(min(benchmark),min(num)) + pd1*rng
lrc = min(min(benchmark),min(num)) + pd2*rng
url = np.where((benchmark < urc) | (benchmark > lrc))
ndat = 'There are no data in this range to analyze'
ctwt(url,num,benchmark,ndat,("n", "a"))
# upper
txt.insert(tk.END,'\n',("n", "a"))
txt.insert(tk.END,'***Upper***\n', ("w", "b"))
lrc = min(min(benchmark),min(num)) + pd2*rng
url = np.where(benchmark <= lrc)
ctwt(url,num,benchmark,ndat,False)
# lower
txt.insert(tk.END,'\n')
txt.insert(tk.END,'***Lower***\n', ("n", "b"))
urc = min(min(benchmark),min(num)) + pd1*rng
url = np.where(benchmark >= urc)
ctwt(url,num,benchmark,ndat,("n", "a"))
txt.insert(tk.END,'\n',("n", "a"))
def ValidationPlt(): # Validation Plots
global Obwc,Obmc,Obvc
import matplotlib.pyplot as plt
import numpy as np
x = odat(fname)
num = x[0]# 1st tab: numerical data
benchmark = x[1] # 2nd tab: benchmark data
if np.asarray(num).shape[1] > 1:
num,benchmark= (np.asarray(w).ravel() for w in [num,benchmark])
with plt.style.context(('grayscale')):
# plot Numerical and Benchmark data
if Obwc.get():
fig, ax = plt.subplots()
ax.plot(num, label='Numerical')
ax.plot(benchmark, label='Benchmark')
plt.xlabel('Data Points')
plt.ylabel('State')
ax.set_xlim((0, len(num)))
ax.set_ylim((min(min(num),min(benchmark)),max(max(num),
max(benchmark))))
plt.legend(prop={'size':12})
#plt.legend()
plt.show()
# plot Benchmark verus Numerical
if Obvc.get():
fig, ax = plt.subplots()
ax.plot([0,max(max(num),max(benchmark))],[0,max(max(num),
max(benchmark))],'r')
ax.scatter(benchmark,num, s=10)
ax.set_xlim((min(benchmark), max(benchmark)))
ax.set_ylim((min(num),max(num)))
plt.xlabel('Benchmark')
plt.ylabel('Numerical')
plt.show()
# plot Benchmark minus Numerical data
if Obmc.get():
fig, ax = plt.subplots()
ax.plot(benchmark-num, '#0072B2', label='Benchmark - Numerical')
plt.xlabel('Data Points')
plt.ylabel('State Difference')
ax.set_xlim((0, len(num)))
ax.set_ylim((min(benchmark-num),max(benchmark-num)))
plt.legend(prop={'size':12})
#plt.legend()
plt.show()
def Verification(interp_method,log_flag,runopt,*args): # Verification
global imat,book,Surf,Scat,Cont,Imag,popt,varc1,varc2,varc3,varc4,specifar
import numpy as np
import math
import xlwt
from numpy import linalg as LA
if 'figures' in log_flag:
popt = [0]*len(popt)
if Cont.get(): popt[7] = 1
if Surf.get(): popt[5] = 1
if Scat.get(): popt[4] = 1
if Imag.get(): popt[3] = 1
# text format tags
txt.tag_config("n", background="#e7efff", foreground="red")
txt.tag_config("a", foreground="black")
txt.tag_config("w", background="#ffffff", foreground="red")
label = tk.Label(root, text="T")
font = tkFont.Font(font=label['font'])
txt.tag_config("b", foreground="black",
font=(font.actual()['family'],
'-'+str(font.actual()['size']),"bold"))
txt.tag_raise("sel")
eo = 'Interpolation method is: %(interp_method)s \n' \
'The method for the results is: %(log_flag)s' % locals()
txt.insert('1.0',eo+'\n',("n", "a"))
txt.mark_set(tk.INSERT,'1.0')
if 'Uncertainty' in runopt:
if ('95' in args[0]) & ('Str' in args[0]): fs = 1.25
elif ('95' in args[0]) & ('Uns' in args[0]): fs = 3.0
elif ('99' in args[0]) & ('Str' in args[0]): fs = 1.65
elif ('99' in args[0]) & ('Uns' in args[0]): fs = 4.0
eo = 'Factor of safety: %s \n' % (fs,)
txt.insert(tk.END,eo,("n", "a"))
# load data
x = odat(fname)
eo = 'Data is loaded: %s \n' % (fname,)
txt.insert(tk.END,eo)
# sort mesh qualities
smq = [(i, x[i].shape[0]) for i in range(3)]
smq = sorted(smq, key=lambda shape: shape[1])
coarse,mid,fine = [x[smq[i][0]] for i in range(3)]
# manage data
xc,zc,pc = (coarse[:,w] for w in range(3))
xm,zm,pm = (mid[:,w] for w in range(3))
xf,zf,pf = (fine[:,w] for w in range(3))
# record exact solution
if 'Code' in runopt:
pce, pme, pfe = (w[:,3] for w in [coarse,mid,fine])
pc,pm,pf = pc-pce, pm-pme, pf-pfe
hc,hf = (np.sqrt(1. / len(w)) for w in [xc,xf])
# convert integer values to floats
def conf(*args):
for w in args:
yield w.astype('float')
xc,zc,pc = conf(xc,zc,pc)
xm,zm,pm = conf(xm,zm,pm)
xf,zf,pf = conf(xf,zf,pf)
# exclue values outside of user specified ranges
if specifar.get():
xc,zc,pc = excut(xc,zc,pc,varc1.get(),varc2.get(),varc3.get(),
varc4.get())
def vspa(x):
x = np.unique(x.ravel().tolist()[0])
x = abs(x[1] - x[0])
return x
xsp,zsp = vspa(xm),vspa(zm)
xm,zm,pm = excut(xm,zm,pm,varc1.get()-xsp,varc2.get()+xsp,
varc3.get()-zsp,varc4.get()+zsp)
xsp,zsp = vspa(xf),vspa(zf)
xf,zf,pf = excut(xf,zf,pf,varc1.get()-xsp,varc2.get()+xsp,
varc3.get()-zsp,varc4.get()+zsp)
# determin number of unique values
num_c_x,num_c_y,num_m_x,num_m_y,num_f_x,num_f_y \
= (unval(w) for w in [xc,zc,xm,zm,xf,zf])
# check if unstructured
ustruc = False
if (num_c_x * num_c_y > len(xc) and num_m_x * num_m_y > len(xm) and
num_f_x * num_f_y > len(xf)):
ustruc = True
eo = 'Unstructured Input Mesh'
txt.insert(tk.END,'\n'+eo,("n", "a"))
# don't plot image
popt[3] = 0
else:
eo = 'Structured Input Mesh'
txt.insert(tk.END,eo+'\n',("n", "a"))
if ustruc:
rf = (float(num_m_x)/float(num_c_x))**(1./2.)
rc = (float(num_f_x)/float(num_m_x))**(1./2.)
else: # refinement ratios by mesh spacing
# calculate rc
x,y = (np.unique(w.ravel().tolist()[0]) for w in [xc,xm])
rc = abs(x[1]-x[0])/abs(y[1]-y[0])
x,y = (np.unique(w.ravel().tolist()[0]) for w in [zc,zm])
rc = (rc*abs(x[1]-x[0])/abs(y[1]-y[0]))**0.5
# calculate rf
x,y = (np.unique(w.ravel().tolist()[0]) for w in [xm,xf])
rf = abs(x[1]-x[0])/abs(y[1]-y[0])
x,y = (np.unique(w.ravel().tolist()[0]) for w in [zm,zf])
rf = (rf*abs(x[1]-x[0])/abs(y[1]-y[0]))**0.5
# refinement ratios by numbers of nodes in meshes
#rf = math.sqrt(float(num_f_x)*float(num_f_y)/float(num_m_x)/ \
#float(num_m_y))
#rc = math.sqrt(float(num_m_x)*float(num_m_y)/float(num_c_x)/ \
#float(num_c_y))
eo = 'rc = %(rc)s\nrf = %(rf)s' % locals()
txt.insert(tk.END,eo+'\n')
# this limit (4/3) is suggested by ASME V&V 20-2009, Roache and Ghia
if rf<4./3. or rc<4./3.:
eo = 'Refinment ratio must be greater than 4/3! \n' \
'rc =%(rc)0.3f\n' \
'rf =%(rf)0.3f' % locals()
txt.insert(tk.END,eo+'\n')
if not ustruc: # arrange into matracies
# coarse mesh
arrdimch(num_c_y, num_c_x, xc,'Coarse')
xc,zc,pc = (np.reshape(w,(num_c_y,num_c_x)) for w in [xc,zc,pc])
# medium mesh
arrdimch(num_m_y, num_m_x, xm,'Medium')
xm,zm,pm = (np.reshape(w,(num_m_y,num_m_x)) for w in [xm,zm,pm])
# fine mesh
arrdimch(num_f_y, num_f_x, xf,'Fine')
xf,zf,pf = (np.reshape(w,(num_f_y,num_f_x)) for w in [xf,zf,pf])
if 'figures' in log_flag:
plotmain(popt,xc,zc,pc,'Coarse mesh')
plotmain(popt,xm,zm,pm,'Medium mesh')
plotmain(popt,xf,zf,pf,'Fine mesh')
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet("Coarse mesh")
for i in range(xc.shape[0]):
for j in range(xc.shape[1]):
sheet1.write(i, j, pc[i,j])
XI, YI = np.asarray(xc), np.asarray(zc)
#interpolate refined calculations to locations of coarse mesh
if ustruc:
pmc = inter_fun_un(xm,zm,pm,XI,YI,interp_method)
if runopt == 'Code':
pfc = inter_fun_un(xf,zf,pf,xm,zm,interp_method)
else:
pfc = inter_fun_un(xf,zf,pf,XI,YI,interp_method)
else:
pmc = inter_fun(xm,zm,pm,XI,YI,interp_method)
if runopt == 'Code':
pfc = inter_fun(xf,zf,pf,xm,zm,interp_method)
else:
pfc = inter_fun(xf,zf,pf,XI,YI,interp_method)
if 'figures' in log_flag:
ptx = 'Interpolations Corresponding to'
plotmain(popt,xc,zc,pmc,'Mid %s Coarse' % (ptx,))
if runopt == 'Code':
plotmain(popt,xm,zm,pfc,'Fine %s Mid' % (ptx,))
else:
plotmain(popt,xc,zc,pfc,'Fine %s Coarse' % (ptx,))
sheet2 = book.add_sheet("Interpolated Mid")
sheet3 = book.add_sheet("Interpolated Fine")
for i in range(pmc.shape[0]):
for j in range(pmc.shape[1]):
sheet2.write(i, j, pmc[i,j])
for i in range(pfc.shape[0]):
for j in range(pfc.shape[1]):
sheet3.write(i, j, pfc[i,j])
if ('Solution' in runopt) | ('Uncertainty' in runopt):
# global norms
errc = pmc - pc
errf = pfc - pmc
if 'figures' in log_flag:
plotmain(popt,xc,zc,errc,'Difference Between Mid and Coarse')
plotmain(popt,xc,zc,errf,'Difference Between Fine and Mid')
sheet4 = book.add_sheet("Mid - Coarse")
sheet5 = book.add_sheet("Fine - Mid")
for i in range(errc.shape[0]):
for j in range(errc.shape[1]):
sheet4.write(i, j, errc[i,j])
sheet5.write(i, j, errf[i,j])
# calculate convergence accross entire mesh
if ustruc:
con_p = [con_p.append(calc_con(errc[i],errf[i],rc,rf))
for i in range(xc.shape[0])]
else:
if rf == rc:
con_p = np.log(abs(errf)/abs(errc))/np.log(rf)
con_p = abs(con_p)
NanLoc = np.isnan(con_p)
con_p[NanLoc] = 0. # force nans to zero
else:
con_p = matcon(errc,errf,rc,rf)
if 'Solution' in runopt:
if 'figures' in log_flag:
# plot values equal to and below 4
ccon_p = con_p.copy()
ccon_p[ccon_p > 4] = 4
plotmain(popt,xc,zc,np.array(ccon_p),'Order of Accuracy')
sheet6 = book.add_sheet('Order of Accuracy')
for i in range(con_p.shape[0]):
for j in range(con_p.shape[1]):
sheet6.write(i, j, con_p[i,j])
# reduce extreme convergence values to 10.
con_p[con_p > 10.] = 10.
if not ustruc:
# calculate relative error
if runopt == 'Code':
p_exact = (np.multiply(np.power(rf,con_p),pfc) - pmc) / \
(np.power(rf,con_p) - 1.)
relative_err = abs((p_exact - pfc) / (p_exact + 1e-12))
if 'figures' in log_flag:
plotmain(popt,xc,zc,np.array(relative_err),'Relative Error')
sheet6 = book.add_sheet("Relative Error")
for i in range(relative_err.shape[0]):
for j in range(relative_err.shape[1]):
sheet6.write(i, j, relative_err[i,j])
# calculate GCI
if 'Uncertainty' in runopt:
Gcon_p = con_p.copy()
Gcon_p[Gcon_p < 1.] = 1. # assume order of accuracy of at least one
GCI = np.matrix([[abs(GCI_fine(pfc[i,j],pmc[i,j],rf,Gcon_p[i,j],fs)[0])
for j in range(pmc.shape[1])
]for i in range(pmc.shape[0])])
p_up = np.multiply(pfc,(1. + GCI))
p_down = np.multiply(pfc,(1. - GCI))
gci_error = np.multiply(pfc,GCI)
sheet7 = book.add_sheet("Upper band error")
sheet8 = book.add_sheet("Lower band error")
sheet9 = book.add_sheet("Order of Accuracy")
for i in range(p_up.shape[0]):
for j in range(p_up.shape[1]):
sheet7.write(i, j, p_up[i,j])
sheet8.write(i, j, p_down[i,j])
sheet9.write(i, j, con_p[i,j])
if 'figures' in log_flag:
plotmain(popt,xc,zc,gci_error,
'Absolute error based on Roache GCI')
plotmain(popt,xc,zc,p_up,
'Upper band of calculation error based on Roache GCI')
plotmain(popt,xc,zc,p_down,
'Lower band of calculation error based on Roache GCI')
else:
pass
if 'Solution' in runopt:
sheet6 = book.add_sheet("Order of Accuracy")
for i in range(con_p.shape[0]):
for j in range(con_p.shape[1]):
sheet6.write(i, j, con_p[i,j])
# norms global
def verot(nm,fmt,L1c,L2c,Linfc,L1f,L2f,Linff):
txt.insert(tk.END,32*'='+' \n',(fmt, "a"))
txt.insert(tk.END,nm + ' Domain Norms \n',(fmt, "b"))
eo = 'L1P1 = %(L1c)E \n' \
'L2P1 = %(L2c)E \n' \
'LinfP1 = %(Linfc)E \n' \
'L1P2 = %(L1f)E \n' \
'L2P2 = %(L2f)E \n' \
'LinfP2 = %(Linff)E' % locals()
txt.insert(tk.END,eo+'\n',(fmt, "a"))
def conot(nm,L1_con,L2_con,L_inf_con,*args):
eo = '%(nm)sL1 convergence: %(L1_con)0.4f \n' \
'%(nm)sL2 convergence: %(L2_con)0.4f \n' \
'%(nm)sL_inf convergence: %(L_inf_con)0.4f' % locals()
try:
eo += '\n' + nm + 'Median convergence: '+ str('%0.4f' %args[0]) \
+ '\n' + nm + 'Average convergence: '+ str('%0.4f' %args[1])
except:
pass
return eo
def domcals(nm,fmt,L1c,L2c,Linfc,L1f,L2f,Linff,med_con,ave_con):
if 'Long log' in log_flag:
# OP norms
if ('Solution' in runopt) | ('Uncertainty' in runopt):
txt.insert(tk.END,32*'='+' \n',(fmt, "a"))
txt.insert(tk.END,nm + ' Domain Norms \n',(fmt, "b"))
eo = 'L1c = %(L1c)0.4e \n' \
'L2c = %(L2c)0.4e \n' \
'Linfc = %(Linfc)0.4e \n' \
'L1f = %(L1f)0.4e \n' \
'L2f = %(L2f)0.4e \n' \
'Linff = %(Linff)0.4e' % locals()
txt.insert(tk.END,eo+'\n',(fmt, "a"))
if 'Solution' in runopt:
# global convergence
L1_con = calc_con(L1c,L1f,rc,rf)
L2_con = calc_con(L2c,L2f,rc,rf)
L_inf_con = calc_con(Linfc,Linff,rc,rf)
txt.insert(tk.END,24*'-'+' \n',(fmt, "a"))
txt.insert(tk.END,'Order of Accuracy \n',(fmt, "b"))
eo = conot('',L1_con,L2_con,L_inf_con,med_con,ave_con)
txt.insert(tk.END,eo+'\n',(fmt, "a"))
def vdomcals(nm,fmt,L1_ec,L2_ec,Linf_ec,L1c,L2c,Linfc,L1f,L2f,Linff):
if 'Long log' in log_flag:
# OP norms
eo = verot(nm,fmt,L1c,L2c,Linfc,L1f,L2f,Linff)
# global convergence
def obcon(mt,L1_con,L2_con,L_inf_con):
txt.insert(tk.END,24*'-'+' \n',(fmt, "a"))
txt.insert(tk.END,'Order of Accuracy \n',(fmt, "b"))
txt.insert(tk.END,conot(mt,L1_con,L2_con,L_inf_con)+'\n',
(fmt, "a"))
L1_con,L2_con,L_inf_con = \
(np.log(w)/np.log(rf) for w in [L1c/L1f,L2c/L2f,Linfc/Linff])
obcon('P1 ',L1_con,L2_con,L_inf_con)
L1_con,L2_con,L_inf_con = \
(np.log(w)/np.log(rc) for w in [L1_ec/L1c,L2_ec/L2c,Linf_ec/Linfc])
obcon('P2 ',L1_con,L2_con,L_inf_con)
if ('Solution' in runopt) | ('Uncertainty' in runopt):
# Global Domain
L1c,L2c,Linfc = (LA.norm(np.asarray(errc.ravel())[0],w)
for w in [1,2,np.inf])
L1f,L2f,Linff = (LA.norm(errf.ravel(),w) for w in [1,2,np.inf])
med_con = np.median(np.asarray(con_p.ravel()))
ave_con = np.average(np.asarray(con_p.ravel()))
domcals('Global',"n",L1c,L2c,Linfc,L1f,L2f,Linff,med_con,ave_con)
# Inner Domain
if not ustruc:
L1c,L2c,Linfc = (LA.norm(np.asarray(errc[1:-1,1:-1].ravel())[0],w)
for w in [1,2,np.inf])
L1f,L2f,Linff = (LA.norm(errf[1:-1,1:-1].ravel(),w)
for w in [1,2,np.inf])
med_con = np.median(np.asarray(con_p[1:-1,1:-1].ravel()))
ave_con = np.average(np.asarray(con_p[1:-1,1:-1].ravel()))
domcals('Inner',"a",L1c,L2c,Linfc,L1f,L2f,Linff,med_con,ave_con)
# Boundary Domain
errc[1:-1,1:-1] = 0.
errf[1:-1,1:-1] = 0.
con_p[1:-1,1:-1] = 999.
icon = np.sort(np.asarray(con_p.ravel()))
L1c,L2c,Linfc = (LA.norm(np.asarray(errc.ravel())[0],w)
for w in [1,2,np.inf])
L1f,L2f,Linff = (LA.norm(errf.ravel(),w) for w in [1,2,np.inf])
icon = icon.ravel()
med_con = np.median(icon[0:np.max(np.where(icon<999.))].ravel())
ave_con = np.average(icon[0:np.max(np.where(icon<999.))].ravel())
domcals('Boundary',"n",L1c,L2c,Linfc,L1f,L2f,Linff,med_con,ave_con)
else:
# Global Domain
L1_ec,L2_ec,Linf_ec = (LA.norm(np.asarray(pc.ravel())[0],w)
for w in [1,2,np.inf])
L1c,L2c,Linfc = (LA.norm(pmc.ravel(),w) for w in [1,2,np.inf])
L1f,L2f,Linff = (LA.norm(pfc.ravel(),w) for w in [1,2,np.inf])
vdomcals('Global',"n",L1_ec,L2_ec,Linf_ec,L1c,L2c,Linfc,L1f,L2f,Linff)
# Inner Domain
if not ustruc:
L1_ec,L2_ec,Linf_ec = \
(LA.norm(np.asarray(pc[1:-1,1:-1].ravel())[0],w)
for w in [1,2,np.inf])
L1c,L2c,Linfc = (LA.norm(pmc[1:-1,1:-1].ravel(),w)
for w in [1,2,np.inf])
L1f,L2f,Linff = (LA.norm(pfc[1:-1,1:-1].ravel(),w)
for w in [1,2,np.inf])
vdomcals('Inner',"a",L1_ec,L2_ec,Linf_ec,L1c,L2c,Linfc,L1f,L2f,
Linff)
# Boundary Domain
pc[1:-1,1:-1] = 0.
pmc[1:-1,1:-1] = 0.
pfc[1:-1,1:-1] = 0.
L1_ec,L2_ec,Linf_ec = (LA.norm(np.asarray(pc.ravel())[0],w)
for w in [1,2,np.inf])
L1c,L2c,Linfc = (LA.norm(pmc.ravel(),w) for w in [1,2,np.inf])
L1f,L2f,Linff = (LA.norm(pfc.ravel(),w) for w in [1,2,np.inf])
vdomcals('Boundary',"n",L1_ec,L2_ec,Linf_ec,L1c,L2c,Linfc,L1f,L2f,
Linff)
#######################
# Graphical Interface #
#######################
import Tkinter as tk
import ttk
import tkFont
def rmrw(self,r):
for label in self.grid_slaves():
if int(label.grid_info()['row']) > r:
label.destroy()
return self
def wrng(wrn,pl):
rmrw(root,pl)
butttons(root,pl+1,'nm')
txt = tk.Text(relief=tk.SUNKEN)
txt.config(width=30,height=3,background="#dd4e4c")
txt.grid(row=pl,column=0,columnspan=2,sticky=tk.W+tk.E+tk.N+tk.S,padx=5)
txt.tag_config("r", background="#dd4e4c", foreground="#dd4e4c")
txt.tag_config("e", foreground="black", justify="center")
txt.insert('1.0',wrn, ("r", "e"))
def addmenu():
global rw,var1, var2, var3, var4, tools, Tmen
rmrw(root,rw-1)
if 'Uncertainty' in var1.get():
rw += 4
# GCI Confidence Level
w4 = ttk.Label(root, text="GCI Confidence Level:")
w4.grid(row=5,column=0,sticky=tk.E,padx=5)
var4 = tk.StringVar()
opt4 = ttk.Combobox(root,textvariable=var4,state='readonly',
foreground='blue',width=32)
opt4['values'] = ('95% [1.25] (Structured Refinement)',
'95% [3.00] (Unstructured Refinement)',
'99% [1.65] (Structured Refinement)',
'99% [4.00] (Unstructured Refinement)')
opt4.grid(row=5,column=1,sticky=tk.W)
var4.set(20*' ')
else:
rw += 2
butttons(root,rw,'nm')
# Interpolation
w2 = ttk.Label(root, text="Interpolation:")
w2.grid(row=rw-2,column=0,sticky=tk.E,padx=5)
var2 = tk.StringVar()
opt2 = ttk.Combobox(root,textvariable=var2,state='readonly',
foreground='blue')
opt2['values'] = ('Linear',
'Spline',
'Cubic')
opt2.grid(row=rw-2,column=1,sticky=tk.W)
var2.set(20*' ')
# Output Options
w3 = ttk.Label(root, text="Output Options:")
w3.grid(row=rw-1,column=0,sticky=tk.E,padx=5)
var3 = tk.StringVar()
opt3 = ttk.Combobox(root,textvariable=var3,state='readonly',
foreground='blue')
opt3['values'] = ('Short log',
'Short log and figures',
'Long log',
'Long log and figures')
opt3.grid(row=rw-1,column=1,sticky=tk.W)
var3.set(20*' ')
def addmenu0_1():
global rw, var3_1, var4_1, spcu
rmrw(root,3)
rw += 2
butttons(root,rw,'nm')
# Generate Plots
var3_1 = tk.StringVar()
rad1 = ttk.Frame(root)
rad1.grid(row=4,column=1,sticky=tk.W)
opt3 = ttk.Radiobutton(rad1, text='Yes', variable=var3_1, value='Yes')
opt3.pack(side=tk.LEFT)
opt4 = ttk.Radiobutton(rad1, text='No', variable=var3_1, value='No')
var3_1.set('No')
opt4.pack(side=tk.LEFT)
w3 = ttk.Label(root, text=" Generate Plots:")
w3.grid(row=4,column=0,sticky=tk.E,padx=5)
# Specify Cutoff
var4_1 = tk.StringVar()
rad2 = ttk.Frame(root)
rad2.grid(row=5,column=1,sticky=tk.W)
opt3 = ttk.Radiobutton(rad2, command=addmenu2_1, text='Yes',
variable=var4_1, value='Yes')
opt3.pack(side=tk.LEFT)
opt4 = ttk.Radiobutton(rad2, command=addmenu2_1, text='No',
variable=var4_1, value='No')
var4_1.set('No')
opt4.pack(side=tk.LEFT)
w3 = ttk.Label(root, text="Specify Cutoff:")
w3.grid(row=5,column=0,sticky=tk.E,padx=5)
spcu = False
def addmenu2_1():
global rw, var5_1, var6_1, spcu, var4_1
if not spcu and 'Yes' in var4_1.get() and cont:
spcu = True
rmrw(root,5)
rw += 1
butttons(root,rw,'nm')
var5_1 = tk.StringVar()
w3 = ttk.Label(root, text="Upper Cut (%):")
w3.grid(row=6,column=0,sticky=tk.E,padx=5)
opt3 = ttk.Entry(root,textvariable=var5_1,foreground='blue')
var5_1.set('100')
opt3.grid(row=6,column=1,sticky=tk.W)
for label in root.grid_slaves():
if int(label.grid_info()['row'])>6:
label.grid_forget()
rw += 1
butttons(root,rw,'nm')
var6_1 = tk.StringVar()
w4 = ttk.Label(root, text="Lower Cut (%):")
w4.grid(row=7,column=0,sticky=tk.E,padx=5)
opt4 = ttk.Entry(root,textvariable=var6_1,foreground='blue')
var6_1.set('0')
opt4.grid(row=7,column=1,sticky=tk.W)
if spcu and 'No' in var4_1.get() and cont:
spcu = False
rw += -2
butttons(root,rw,'nm')
def addmenu3():
global rw
try: Verification(var2.get(),var3.get(),var1.get(),var4.get())
except ImportError:
wrng('\nImport Error. Check for missing dependencies',rw)
except ArrayLengthError as e:
wrng(e.value,rw)
except:
wrng('\nFile input error',rw)
else:
rmrw(root,rw-1)
rw += 1
if 'Uncertainty' in var1.get():
ht,wd = 15,55
else:
ht,wd = 25,55
sb = ttk.Scrollbar()
sb.config(command=txt.yview)
txt.config(yscrollcommand=sb.set,width=wd,height=ht)
sb.grid(row=rw-1,column=2,sticky=tk.W+tk.N+tk.S)
txt.grid(row=rw-1,column=0,columnspan=2,sticky=tk.W+tk.E+tk.N+tk.S,
padx=5)
root.resizable(tk.TRUE,tk.TRUE)
butttons(root,rw,'en')
root.columnconfigure(0, weight=1)
root.columnconfigure(1, weight=1)
root.rowconfigure(rw-1, weight=1)
def addmenu3_1():
global rw,txt,var3_1,var4_1,var5_1,var6_1,Validation,ValidationPlt
if var3_1.get() == 'Yes':
ValidationPlt()
if var4_1.get() == 'Yes':
ht,wd = 20,55
else:
ht,wd = 11,55
sb = ttk.Scrollbar()
txt = tk.Text(relief=tk.SUNKEN)
sb.config(command=txt.yview)
txt.config(yscrollcommand=sb.set,width=wd,height=ht)
try:
if len(var5_1.get()) > 0:
Validation(float(var5_1.get())/100.,float(var6_1.get())/100.)
else:
Validation(0.,0.)
except ImportError:
wrng('\nImport Error. Check for missing dependencies',rw)
except:
wrng('\nFile format error during read',rw)
else:
if var4_1.get() == 'Yes':
rmrw(root,7)
rw += 1
else:
rmrw(root,5)
rw += 3
sb.grid(row=8,column=2,sticky=tk.W+tk.N+tk.S)
txt.grid(row=8,column=0,columnspan=2,sticky=tk.W+tk.E+tk.N+tk.S,padx=5)
root.columnconfigure(0, weight=1)
root.columnconfigure(1, weight=1)
root.rowconfigure(rw-1, weight=1)
root.resizable(tk.TRUE,tk.TRUE)
butttons(root,rw,'en')
def openfl():
from tkFileDialog import askopenfilename
global fname, rw
rmrw(root,2)
rw += 1
butttons(root,rw,'nm')
w3 = ttk.Label(root, text="Input File:")
w3.grid(row=3,column=0,sticky=tk.E,padx=5)
fltp = ('*.xlsx','*.xls','*.h5','*.csv','*.txt','*.dat')
fname = askopenfilename(filetypes=[('Input',fltp)],
title='Select Run File')
if len(fname) > 0:
w4 = ttk.Label(root, text=fname.split('/')[-1], foreground='blue')
w4.grid(row=3,column=1,sticky=tk.W)
if 'Validation' in var1.get():
addmenu0_1()
else:
addmenu()
else:
wrng('\nAn input file must be selected',rw)
def svtext():# write output to text file
from tkFileDialog import asksaveasfilename
fname = asksaveasfilename(filetypes=[('Text',('*.txt'))],
title='Text Output Save As')
if fname:
with open(fname,'w') as file:
file.write(txt.get(1.0,tk.END))
def svmesh():# write meshes to spreadsheet file
from tkFileDialog import asksaveasfilename
fname = asksaveasfilename(filetypes=[('Workbook',('*.xls'))],
title='Mesh Output Save As')
if fname:
book.save(fname)
def about():# VAVUQ short description
abo = tk.Toplevel(root,background='white')
abo.title('About VAVUQ')
mtxt = 'VAVUQ'
msg = ttk.Label(abo,text=mtxt,justify='center',font='TkCaptionFont',
background='white')
msg.pack()
mtxt = 'Version: 2.4'
msg = ttk.Label(abo,text=mtxt,justify='center',font='TkHeadingFont',
background='white')
msg.pack()
mtxt = """
VAVUQ (Verification And Validation and Uncertainty Quantification) can
be used as a general purpose program for verification, validation, and
uncertainty quantification. The motivation for the creation of and
continued development of the program is to provide a cost effective and
easy way to assess the quality of computational approximations. The hope
is that the methods used in this program can be applied to fields such
as civil engineering where they are currently often underutilized. This
code was created through efforts from Bombardelli's group and Dr. Bill
Fleenor at the University of California Davis. The creators belong to
the Civil & Environmental Engineering (CEE) Department, the Center for
Watershed Sciences (CWS), and the Delta Solution Team (DST). The main
code development is headed by Kaveh Zamani and James E. Courtney.
"""
msg = ttk.Label(abo,text=mtxt,background='white')
#msg.config(bg='white',font=('times',12,'italic'))
x = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2
y = (root.winfo_screenheight() - root.winfo_reqheight()) / 2
abo.geometry("+%d+%d" % (x, y))
msg.pack()
def state():
global var1,var2,var3,var3_1,var5_1,var6_1,clr,rw,txt
for label in root.grid_slaves():
if (int(label.grid_info()['row'])<rw and
int(label.grid_info()['column']) == 1):
try:
label['state'] = 'disabled'
except:
pass
if clr: # Clear event
clr = False
openfl()
else:
if 'Validation' in var1.get():
if not fname:
openfl()
elif len(var3_1.get()) == 0:
addmenu0_1()
elif 'Yes' in var4_1.get():
if len(var5_1.get()) > 0:
if var5_1.get() > var6_1.get():
butttons(root,rw,'lb')
root.after(10,addmenu3_1)
else:
wrng('\nUpper cut must be greater than the lower',8)
else:
addmenu2_1()
else:
rmrw(root,rw)
butttons(root,rw,'lb')
root.after(10,addmenu3_1)
else: # Verification
if not fname:
openfl()
else:
txt = tk.Text(relief=tk.SUNKEN)
rmrw(root,rw)
butttons(root,rw,'lb')
root.after(10,addmenu3)
def stpr():
root.destroy()
def clear():
global clr,var2,var3,var3_1,var4_1,var5_1,var6_1,rw,cont
opt1['state'] = 'readonly'
clr = True
rmrw(root,2)
var2,var3,var3_1,var4_1,var5_1,var6_1 = (tk.StringVar() for _ in xrange(6))
root.resizable(tk.FALSE, tk.FALSE)
butttons(root,3,'nm')
rw = 3
for i in range(7):
root.rowconfigure(i, weight=0)
root.winfo_toplevel().wm_geometry("")
cont = True
if specifar:
specifar.set(0)
for i in range(10):
root.columnconfigure(i, weight=0)
root.rowconfigure(i, weight=0)
def szgr(self,rw):
self.sz = ttk.Sizegrip()
self.sz.grid(row=rw,column=2,sticky=tk.SE)
def butttons(self,rw,o):
global cont
style = ttk.Style()
style.configure("E.TButton", width = 7)
style.map("E.TButton",
foreground=[('!active', 'black'), ('pressed', 'red'),
('active', 'blue')],
background=[('!active', '#71ee6d'),('pressed', 'disabled', 'black'),
('active', '#71ee6d')]
)
style.configure("C.TButton", width = 7)
style.map("C.TButton",
foreground=[('!active', 'black'),('pressed', 'red'),
('active', 'blue')],
background=[('!active', '#b8fff3'),('pressed', 'disabled', 'black'),
('active', '#b8fff3')]
)
style.configure("X.TButton", width = 7)
style.map("X.TButton",
foreground=[('!active', 'black'),('pressed', 'red'),
('active', 'blue')],
background=[('!active', '#e2664c'),('pressed', 'disabled', 'black'),
('active', '#e2664c')]
)
rmrw(self,rw-1)
ttk.Button(self, command=clear, text='Clear',
style="C.TButton").grid(row=rw,column=1,sticky=tk.W,padx=5,
pady=5)
ttk.Button(self, command=stpr, text='Exit',
style="X.TButton").grid(row=rw,column=0,sticky=tk.E)
if o == 'lb':
ttk.Label(self,
text="Processing...").grid(row=rw,column=1,sticky=tk.E,
padx=5)
elif o == 'nm':
ebutton = ttk.Button(self, command=state, text='Enter',
style="E.TButton")
ebutton.grid(row=rw,column=1,sticky=tk.E,padx=5)
elif o == 'en':
ttk.Button(self, command=state, text='Enter',
state=tk.DISABLED).grid(row=rw,column=1,sticky=tk.E,padx=5)
szgr(self,rw)
cont = False
def bug():
def cbug(event):
import webbrowser
import warnings
with warnings.catch_warnings():
site = 'https://github.com/VAVUQ/VAVUQ'
webbrowser.open_new(site)
abo.destroy()
abo = tk.Toplevel(root)
abo.title('Contribute/Report Bug...')
mtxt = 'Please report any bugs or contribute to VAVUQ by visiting the' \
' following repository:'
msg = tk.Message(abo,text=mtxt, width=250)
msg.config(font=('times',12))
link = ttk.Label(abo, text="VAVUQ Repository",foreground="blue",
font=('times',12,'italic'), cursor="hand2")
msg.pack()
link.pack()
link.bind("<Button-1>", cbug)
x = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2
y = (root.winfo_screenheight() - root.winfo_reqheight()) / 2
abo.geometry("+%d+%d" % (x, y))
def doc():
def cdoc(event):
import webbrowser
import warnings
with warnings.catch_warnings():
site = 'http://vavuq.org'
webbrowser.open_new(site)
abo.destroy()
abo = tk.Toplevel(root)
abo.title('Documentation')
mtxt = 'Visit the following web page for the VAVUQ documentation:'
msg = tk.Message(abo,text=mtxt, width=250)
msg.config(font=('times',12))
link = ttk.Label(abo, text="Vavuq.org",foreground="blue",
font=('times',12,'italic'), cursor="hand2")
msg.pack()
link.pack()
link.bind("<Button-1>", cdoc)
x = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2
y = (root.winfo_screenheight() - root.winfo_reqheight()) / 2
abo.geometry("+%d+%d" % (x, y))
def copt():
global rw,varc1,varc2,varc3,varc4
if specifar.get() and cont:
rmrw(root,rw-1)
varc1,varc2,varc3,varc4 = (tk.DoubleVar() for _ in xrange(4))
opt = ttk.Label(root, text="X min | X max:")
opt.grid(row=rw,column=0,sticky=tk.E,padx=5)
cent = ttk.Frame(root)
cent.grid(row=rw,column=1,sticky=tk.W)
op1 = ttk.Entry(cent,textvariable=varc1,width=11,foreground='blue')
op1.pack(side=tk.LEFT)
op2 = ttk.Entry(cent,textvariable=varc2,width=11,foreground='blue')
op2.pack(side=tk.LEFT)
opt = ttk.Label(root, text="Y min | Y max:")
opt.grid(row=rw+1,column=0,sticky=tk.E,padx=5)
cent2 = ttk.Frame(root)
cent2.grid(row=rw+1,column=1,sticky=tk.W)
op3 = ttk.Entry(cent2,textvariable=varc3,width=11,foreground='blue')
op3.pack(side=tk.LEFT)
op4 = ttk.Entry(cent2,textvariable=varc4,width=11,foreground='blue')
op4.pack(side=tk.LEFT)
rw += 2
butttons(root,rw,'nm')
elif cont:
rmrw(root,rw-3)
rw += -2
butttons(root,rw,'nm')
global cont
cont = True
root = tk.Tk()
root.title("VAVUQ")
menubar = ttk.Frame(root)
menubar.grid(row=0,column=0,columnspan=2,sticky=tk.W)
Fmen = tk.Menubutton(menubar, text='File', underline=0)
Fmen.pack(side=tk.LEFT)
fle = tk.Menu(Fmen,tearoff=0)
fle.add_command(command=clear,label='Clear', underline=0)
fle.add_separator()
fle.add_command(command=svtext, label='Save Output Text As..', underline=12)
fle.add_command(command=svmesh, label='Save Output Meshes As..', underline=12)
fle.add_separator()
fle.add_command(command=stpr, label='Quit', underline=0)
Fmen.config(menu=fle)
Pmen = tk.Menubutton(menubar, text='Plotting', underline=0)
Pmen.pack(side=tk.LEFT)
plm = tk.Menu(Pmen, tearoff=0)
Pmen.config(menu=plm)
#Verification plotting submenu
vasub = tk.Menu(Pmen, tearoff=0)
Surf,Scat,Cont,Imag = (tk.BooleanVar() for _ in xrange(4))
Surf.set(True)
vasub.add_checkbutton(label='Surface Plot', onvalue=1, offvalue=0,
variable=Surf)
vasub.add_checkbutton(label='Scatter Plot', onvalue=1, offvalue=0,
variable=Scat)
vasub.add_checkbutton(label='Contour Plot', onvalue=1, offvalue=0,
variable=Cont)
vasub.add_checkbutton(label='Image Plot', onvalue=1, offvalue=0,
variable=Imag)
plm.add_cascade(label='Verification', menu=vasub, underline=0)
# Validation plotting submenu
vesub = tk.Menu(Pmen, tearoff=0)
Obvc,Obwc,Obmc = (tk.BooleanVar() for _ in xrange(3))
Obvc.set(True)
vesub.add_checkbutton(label='Observed V.S. Calc Plot', onvalue=1, offvalue=0,
variable=Obvc)
vesub.add_checkbutton(label='Observed W/ Calc Plot', onvalue=1, offvalue=0,
variable=Obwc)
vesub.add_checkbutton(label='Observed - Calc Plot', onvalue=1, offvalue=0,
variable=Obmc)
plm.add_cascade(label='Validation', menu=vesub, underline=0)
specifar = tk.BooleanVar()
Tmen = tk.Menubutton(menubar, text='Tools', underline=0)
Tmen.pack(side=tk.LEFT)
tools = tk.Menu(Tmen,tearoff=0)
tools.add_checkbutton(command=copt, label='Cut Mesh', onvalue=1, offvalue=0,
variable=specifar)
Tmen.config(menu=tools)
Hmen = tk.Menubutton(menubar, text='Help', underline=0)
Hmen.pack(side=tk.LEFT)
hlp = tk.Menu(Hmen,tearoff=0)
hlp.add_command(command=doc, label='Documentation', underline=0)
hlp.add_command(command=bug, label='Contribute/Report Bug...', underline=0)
hlp.add_command(command=about, label='About VAVUQ', underline=0)
Hmen.config(menu=hlp)
root.sp = ttk.Separator(orient=tk.HORIZONTAL)
root.sp.grid(row=1,column=0,columnspan=3, sticky=tk.EW)
rw = 2
var1,var2,var3,var4,var3_1,var4_1,var5_1,var6_1 = (tk.StringVar()
for _ in xrange(8))
clr,fname,fname = False, False, False
w1 = ttk.Label(root, text=" Run Option:")
w1.grid(row=rw,column=0,sticky=tk.E,padx=5)
def kpr(evt):
if cont: state()
opt1 = ttk.Combobox(root,textvariable=var1,state='readonly',foreground='blue',
width=34)
opt1['values'] = ('Validation',
'Code Verification (MMS, MES, or DM)',
'Solution Verification',
'Uncertainty Quantification')
opt1.bind('<<ComboboxSelected>>', kpr)
opt1.grid(row=rw,column=1,sticky=tk.W)
var1.set(55*' ')
rw += 1
butttons(root,rw,'nm')
root.withdraw()
root.update_idletasks()
x = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2
y = (root.winfo_screenheight() - root.winfo_reqheight()) / 2 - 250
root.geometry("+%d+%d" % (x, y))
root.deiconify()
root.resizable(tk.FALSE, tk.FALSE)
root.bind("<Return>", kpr)
root.focus_set()
root.mainloop()
|
gpl-3.0
|
bsipocz/bokeh
|
sphinx/source/docs/tutorials/exercises/boxplot.py
|
22
|
2576
|
import numpy as np
import pandas as pd
from bokeh.plotting import figure, output_file, show
# Generate some synthetic time series for six different categories
cats = list("abcdef")
score = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
score[g == l] += i // 2
df = pd.DataFrame(dict(score=score, group=g))
# Find the quartiles, IQR, and outliers for each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need and x (categorical) and y (numeric)
# coordinate for every outlier.
outx = []
outy = []
for cat in cats:
# only add outliers if they exist
if not out.loc[cat].empty:
for value in out[cat]:
outx.append(cat)
outy.append(value)
# EXERCISE: output static HTML file
# create a figure with the categories as the default x-range
p = figure(title="", tools="", background_fill="#EFE8E2", x_range=cats)
# If no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.iloc[:,0]),upper.score) ]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.iloc[:,0]),lower.score) ]
# Draw the upper segment extending from the box plot using `p.segment` which
# takes x0, x1 and y0, y1 as data
p.segment(cats, upper.score, cats, q3.score, line_width=2, line_color="black")
# EXERCISE: use `p.segment` to draw the lower segment
# Draw the upper box of the box plot using `p.rect`
p.rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
# EXERCISE: use `p.rect` to draw the bottom box with a different color
# OK here we use `p.rect` to draw the whiskers. It's slightly cheating, but it's
# easier than using segments or lines, since we can specify widths simply with
# categorical percentage units
p.rect(cats, lower.score, 0.2, 0.01, line_color="black")
p.rect(cats, upper.score, 0.2, 0.01, line_color="black")
# EXERCISE: use `p.circle` to draw the outliers
# EXERCISE: use `p.grid`, `p.axis`, etc. to style the plot. Some suggestions:
# - remove the X grid lines, change the Y grid line color
# - make the tick labels bigger
show(p)
|
bsd-3-clause
|
oshikiri/shirimas
|
src/ShiriMas.py
|
1
|
8070
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'oshikiri'
__email__ = '[email protected]'
__date__ = '2015-02-19'
import os
import sys
import re
import string
import MeCab
import pandas as pd
import sqlite3
from SlackBot.SlackBot import SlackBot
columns = ['type', 'subtype', 'purpose', 'channel',
'ts', 'user', 'username', 'text']
small_kana = 'ァィゥェォャュョッ'
large_kana = 'アイウエオヤユヨツ'
re_hiragana = re.compile('[ぁ-ゔ]', re.U)
def katakana(text):
'''ひらがなをカタカナに置換
以下のページを参考にした:
http://d.hatena.ne.jp/mohayonao/20101213/1292237816
'''
return re_hiragana.sub(lambda x: chr(ord(x.group(0)) + 0x60), text)
def get_yomi(text):
'''MeCabが出力したtextの読みを返す
'''
tagger = MeCab.Tagger("-Ochasen")
result = tagger.parse(text)
yomi_list = []
for r in result.split('\n')[0:-2]:
r_list = r.split('\t')
yomi_list.append(r_list[1])
return ''.join(yomi_list)
def yomi_shiritori(text):
'''textのしりとり用の読みを返す関数
textの読みに以下の処理を適用したものを返す:
1 カタカナに正規化
2 伸ばす棒などを削除
3 小さいカタカナを大きくする
Args
===========
text : string
読みを取得したい単語
'''
# カッコとその中身と改行を無視
text = re.sub(r'(<[^>]+>|\([^)]+\)|\n)', '', text)
yomi0 = get_yomi(text)
yomi0 = katakana(yomi0)
yomi0 = re.sub(r'[^ァ-ヴ]*', '', yomi0)
yomi0 = yomi0.translate(str.maketrans(small_kana, large_kana))
return yomi0
class ShiriMas(SlackBot):
'''shiritori bot for Slack
shiritori-master
'''
def __init__(self, botname, db_path, table_name, channel_name='shiritori'):
super().__init__(botname)
self.botname = botname
self.table_name = table_name
self.set_channel(channel_name)
## もともとSQLiteのファイルが存在しない場合,
## 初期化する必要があると判定
to_initialize = not os.path.exists(db_path)
self.connect = sqlite3.connect(db_path)
self.cursor = self.connect.cursor()
if to_initialize:
print('DBを初期化')
self.initialize_db()
def set_channel(self, channel_name):
'''ShiriMasが投稿するchannelを指定する.
複数のchannelに投稿することは想定しにくいので,
こうしておく.
Args
===========
channel_name: string
投稿するchannelの名前 (idではない) を指定する.
とりあえず不正な値が設定されることは想定していない.
'''
self.channel = super().get_channel_dict()[channel_name]
def get_messages(self, count=1):
'''Slackから最新のメッセージ(count)件を取得する
Args
===========
count : int, optional (default=1)
取得する件数
'''
return super().get_messages(channel=self.channel, count=count)
def post_message(self, message):
'''メッセージをpostする
Args
===========
message: string
postするメッセージ
'''
super().post_message(message, self.channel)
def get_slack_newest_message(self):
'''channelの中で最新のメッセージを取ってくる
'''
return self.get_messages(count=1)[0]
def get_db_newest_message(self):
'''DBの中で最新のメッセージを取ってくる
'''
query = ('SELECT * FROM {0} '
'WHERE ts = (SELECT max(ts) FROM {0}) '
'LIMIT 1').format(self.table_name)
return pd.read_sql(query, self.connect).ix[0,:].to_dict()
def initialize_db(self):
'''DBの初期化
最新1000件のメッセージを取得してDBに追加する
'''
messages = self.get_messages(count=1000)
df = pd.DataFrame(messages, columns=columns)
df['username'] = df.user.map(super().get_users_list())
df['yomi'] = df.text.map(yomi_shiritori)
df.to_sql(self.table_name, self.connect, index=False)
def append_messages(self, slack_newest_message, db_newest_message):
'''DBに格納していないメッセージがある場合,それをDBに追加
Args
===========
slack_newest_message :
Slackの中で最新のメッセージのデータのdict
db_newest_message :
DBの中で最新のメッセージのデータのdict
'''
slack_newest_ts = float(slack_newest_message['ts'])
db_newest_ts = float(db_newest_message['ts'])
if slack_newest_ts > db_newest_ts:
messages = self.get_messages(count=100)
df = pd.DataFrame(messages, columns=columns)
df['username'] = df.user.map(super().get_users_list())
df['yomi'] = df.text.map(yomi_shiritori)
additional_df = df.ix[df.ts.map(float) > db_newest_ts, :]
additional_df.to_sql(self.table_name, self.connect,
index=False, if_exists='append')
def get_candidate(self, prev_yomi):
'''prev_yomiにつながる単語の候補を返す
Args
===========
prev_yomi : string
前の単語の読み
'''
df = pd.DataFrame.from_csv('wikidata.csv', sep=' ',
header=None, index_col=None)
if prev_yomi:
cand_index = df.ix[:, 1] == prev_yomi[-1]
else:
print('前のtextが読めません')
sys.exit()
if not cand_index.any():
print('候補がありません')
sys.exit()
return df.ix[cand_index, :]
def get_proper_candidate(self, candidates):
'''単語の候補の中から適切な単語を探して1つ返す
Args
===========
candidates : pd.DataFrame
単語の候補
'''
for i_cand in candidates.index:
cand_tmp = candidates.ix[i_cand, ]
cand_yomi = yomi_shiritori(str(cand_tmp[2]))
## cand_yomiを読みに含んでいる単語が無かったか調べる
query = ('SELECT count(*) '
'FROM ' + self.table_name + ' '
'WHERE yomi = \'' + cand_yomi + '\';')
self.cursor.execute(query)
res_shape = self.cursor.fetchall()[0]
if res_shape == (0,):
## 読みに含んでいる単語が1つも無かった場合
cand = cand_tmp
break
else:
## 使える候補単語が無かった場合
sys.exit()
return cand
def get_ans(self, prev_yomi):
'''prev_yomiにつながる既出でない単語を返す
Args
===========
prev_yomi : string
前の単語の読み
'''
candidates = self.get_candidate(prev_yomi)
return self.get_proper_candidate(candidates)
def post_shiritori(self, cand, prev_user, prev_yomi):
'''slackにしりとりの回答をpostする
Args
===========
cand : pd.Series
前の単語の読み
prev_user : string
直前に発言したユーザー名
prev_yomi : string
直前の単語の読み
'''
name = cand[0]
yomi = cand[2]
url = 'http://ja.wikipedia.org' + cand[3]
## 連投しないようにする
if not prev_user or prev_user != self.botname:
postmessage = name + '\n\n' + url
self.post_message(postmessage)
print(prev_yomi)
print(name, yomi)
|
mit
|
pydsigner/naev
|
utils/heatsim/heatsim.py
|
20
|
7285
|
#!/usr/bin/env python
#######################################################
#
# SIM CODE
#
#######################################################
# Imports
from frange import *
import math
import matplotlib.pyplot as plt
def clamp( a, b, x ):
return min( b, max( a, x ) )
class heatsim:
def __init__( self, shipname = "llama", weapname = "laser", simulation = [ 60., 120. ] ):
# Sim parameters
self.STEFAN_BOLZMANN = 5.67e-8
self.SPACE_TEMP = 250.
self.STEEL_COND = 54.
self.STEEL_CAP = 0.49
self.STEEL_DENS = 7.88e3
self.ACCURACY_LIMIT = 500
self.FIRERATE_LIMIT = 800
self.shipname = shipname
self.weapname = weapname
# Sim info
self.sim_dt = 1./50. # Delta tick
self.setSimulation( simulation )
# Load some data
self.ship_mass, self.ship_weaps = self.loadship( shipname )
self.weap_mass, self.weap_delay, self.weap_energy = self.loadweap( weapname )
def setSimulation( self, simulation ):
self.simulation = simulation
self.sim_total = simulation[-1]
def loadship( self, shipname ):
"Returns mass, number of weaps."
if shipname == "llama":
return 80., 2
elif shipname == "lancelot":
return 180., 4
elif shipname == "pacifier":
return 730., 5
elif shipname == "hawking":
return 3750., 7
elif shipname == "peacemaker":
return 6200., 8
else:
raise ValueError
def loadweap( self, weapname ):
"Returns mass, delay, energy."
if weapname == "laser":
return 2., 0.9, 4.25
elif weapname == "plasma":
return 4., 0.675, 3.75
elif weapname == "ion":
return 6., 1.440, 15.
elif weapname == "laser turret":
return 16., 0.540, 6.12
elif weapname == "ion turret":
return 42., 0.765, 25.
elif weapname == "railgun turret":
return 60., 1.102, 66.
else:
raise ValueError
def prepare( self ):
# Time stuff
self.time_data = []
# Calculate ship parameters
ship_kg = self.ship_mass * 1000.
self.ship_emis = 0.8
self.ship_cond = self.STEEL_COND
self.ship_C = self.STEEL_CAP * ship_kg
#self.ship_area = pow( ship_kg / self.STEEL_DENS, 2./3. )
self.ship_area = 4.*math.pi*pow( 3./4.*ship_kg/self.STEEL_DENS/math.pi, 2./3. )
self.ship_T = self.SPACE_TEMP
self.ship_data = []
# Calculate weapon parameters
weap_kg = self.weap_mass * 1000.
self.weap_C = self.STEEL_CAP * weap_kg
#self.weap_area = pow( weap_kg / self.STEEL_DENS, 2./3. )
self.weap_area = 2.*math.pi*pow( 3./4.*weap_kg/self.STEEL_DENS/math.pi, 2./3. )
self.weap_list = []
self.weap_T = []
self.weap_data = []
for i in range(self.ship_weaps):
self.weap_list.append( i*self.weap_delay / self.ship_weaps )
self.weap_T.append( self.SPACE_TEMP )
self.weap_data.append( [] )
def __accMod( self, T ):
return clamp( 0., 1., (T-500.)/600. )
def __frMod( self, T ):
return clamp( 0., 1., (1100.-T)/300. )
def simulate( self ):
"Begins the simulation."
# Prepare it
self.prepare()
# Run simulation
weap_on = True
sim_index = 0
dt = self.sim_dt
sim_elapsed = 0.
while sim_elapsed < self.sim_total:
Q_cond = 0.
# Check weapons
for i in range(len(self.weap_list)):
# Check if we should start/stop shooting
if self.simulation[ sim_index ] < sim_elapsed:
weap_on = not weap_on
sim_index += 1
# Check if shot
if weap_on:
self.weap_list[i] -= dt * self.__frMod( self.weap_T[i] )
if self.weap_list[i] < 0.:
self.weap_T[i] += 1e4 * self.weap_energy / self.weap_C
self.weap_list[i] += self.weap_delay
# Do heat movement (conduction)
Q = -self.ship_cond * (self.weap_T[i] - self.ship_T) * self.weap_area * dt
self.weap_T[i] += Q / self.weap_C
Q_cond += Q
self.weap_data[i].append( self.weap_T[i] )
# Do ship heat (radiation)
Q_rad = self.STEFAN_BOLZMANN * self.ship_area * self.ship_emis * (pow(self.SPACE_TEMP,4.) - pow(self.ship_T,4.)) * dt
Q = Q_rad - Q_cond
self.ship_T += Q / self.ship_C
self.time_data.append( sim_elapsed )
self.ship_data.append( self.ship_T )
# Elapsed time
sim_elapsed += dt;
def save( self, filename ):
"Saves the results to a file."
f = open( self.filename, 'w' )
for i in range(self.time_data):
f.write( str(self.time_data[i])+' '+str(self.ship_data[i]))
for j in range(self.weap_data):
f.write( ' '+str(self.weap_data[i][j]) )
f.write( '\n' )
f.close()
def display( self ):
print("Ship Temp: "+str(hs.ship_T)+" K")
for i in range(len(hs.weap_list)):
print("Outfit["+str(i)+"] Temp: "+str(hs.weap_T[i])+" K")
def plot( self, filename=None ):
plt.hold(False)
plt.figure(1)
# Plot 1 Data
plt.subplot(211)
plt.plot( self.time_data, self.ship_data, '-' )
# Plot 1 Info
plt.axis( [0, self.sim_total, 0, 1100] )
plt.title( 'NAEV Heat Simulation ('+self.shipname+' with '+self.weapname+')' )
plt.legend( ('Ship', 'Accuracy Limit', 'Fire Rate Limit'), loc='upper left')
plt.ylabel( 'Temperature [K]' )
plt.grid( True )
# Plot 1 Data
plt.subplot(212)
plt.plot( self.time_data, self.weap_data[0], '-' )
plt.hold(True)
plt_data = []
for i in range(len(self.weap_data[0])):
plt_data.append( self.ACCURACY_LIMIT )
plt.plot( self.time_data, plt_data, '--' )
plt_data = []
for i in range(len(self.weap_data[0])):
plt_data.append( self.FIRERATE_LIMIT )
plt.plot( self.time_data, plt_data, '-.' )
plt.hold(False)
# Plot 2 Info
plt.axis( [0, self.sim_total, 0, 1100] )
plt.legend( ('Weapon', 'Accuracy Limit', 'Fire Rate Limit'), loc='upper right')
plt.ylabel( 'Temperature [K]' )
plt.xlabel( 'Time [s]' )
plt.grid( True )
if filename == None:
plt.show()
else:
plt.savefig( filename )
if __name__ == "__main__":
print("NAEV HeatSim\n")
shp_lst = { 'llama' : 'laser',
'lancelot' : 'ion',
'pacifier' : 'laser turret',
'hawking' : 'ion turret',
'peacemaker' : 'railgun turret' }
for shp,wpn in shp_lst.items():
hs = heatsim( shp, wpn, (60., 120.) )
#hs = heatsim( shp, wpn, frange( 30., 600., 30. ) )
hs.simulate()
hs.plot( shp+'_'+wpn+'_60_60.png' )
hs.setSimulation( (30., 90.) )
hs.simulate()
hs.plot( shp+'_'+wpn+'_30_60.png' )
hs.setSimulation( (30., 90., 120., 180.) )
hs.simulate()
hs.plot( shp+'_'+wpn+'_30_60_30_60.png' )
print( ' '+shp+' with '+wpn+' done!' )
|
gpl-3.0
|
nonsk131/USRP2016
|
generate_tests.py
|
1
|
3175
|
from isochrones.dartmouth import Dartmouth_Isochrone
from isochrones.utils import addmags
import numpy as np
import pandas as pd
file = open('true_params.txt','w')
for n in range(0,100,1):
if n < 10:
index = '0' + str(n)
else:
index = str(n)
file.write('test: ' + index + '\n')
dar = Dartmouth_Isochrone()
array = np.random.rand(2) + 0.5
if array[0] > array[1]:
M1 = array[0]
M2 = array[1]
else:
M1 = array[1]
M2 = array[0]
age1 = np.log10(5e8)
age2 = np.log10(1e9)
feh1 = 0.0
array = 900*np.random.rand(2) + 100
if array[0] > array[1]:
distance1 = array[0]
distance2 = array[1]
else:
distance1 = array[1]
distance2 = array[0]
AV1 = 0.1
feh2 = 0.0
AV2 = 0.1
params = (M1,M2,age1,age2,feh1,feh2,distance1,distance2,AV1,AV2)
params = str(params)
file.write('(M1,M2,age1,age2,feh1,feh2,distance1,distance2,AV1,AV2) = ' + params + '\n')
file.write('\n')
#Simulate true magnitudes
unresolved_bands = ['J','H','K']
resolved_bands = ['i','K']
args1 = (age1, feh1, distance1, AV1)
args2 = (age2, feh2, distance2, AV2)
unresolved = {b:addmags(dar.mag[b](M1, *args1), dar.mag[b](M2, *args2)) for b in unresolved_bands}
resolved_1 = {b:dar.mag[b](M1, *args1) for b in resolved_bands}
resolved_2 = {b:dar.mag[b](M2, *args2) for b in resolved_bands}
#print dar.mag['K'](M2, *args2)
#print unresolved, resolved_1, resolved_2
instruments = ['twomass','RAO']
bands = {'twomass':['J','H','K'],
'RAO':['i','K']}
mag_unc = {'twomass': 0.02, 'RAO':0.1}
resolution = {'twomass':4.0, 'RAO':0.1}
relative = {'twomass':False, 'RAO':True}
separation = 0.5
PA = 100.
columns = ['name', 'band', 'resolution', 'relative', 'separation', 'pa', 'mag', 'e_mag']
df = pd.DataFrame(columns=columns)
i=0
for inst in ['twomass']: #Unresolved observations
for b in bands[inst]:
row = {}
row['name'] = inst
row['band'] = b
row['resolution'] = resolution[inst]
row['relative'] = relative[inst]
row['separation'] = 0.
row['pa'] = 0.
row['mag'] = unresolved[b]
row['e_mag'] = mag_unc[inst]
df = df.append(pd.DataFrame(row, index=[i]))
i += 1
for inst in ['RAO']: #Resolved observations
for b in bands[inst]:
mags = [resolved_1[b], resolved_2[b]]
pas = [0, PA]
seps = [0., separation]
for mag,sep,pa in zip(mags,seps,pas):
row = {}
row['name'] = inst
row['band'] = b
row['resolution'] = resolution[inst]
row['relative'] = relative[inst]
row['separation'] = sep
row['pa'] = pa
row['mag'] = mag
row['e_mag'] = mag_unc[inst]
df = df.append(pd.DataFrame(row, index=[i]))
i += 1
#print df
df.to_csv(path_or_buf='df_binary_test{}.csv'.format(index))
file.close()
|
mit
|
amirkdv/biseqt
|
experiments/blot_sv.py
|
1
|
15152
|
#!/usr/bin/env python
import logging
from itertools import combinations, product
import sys
import numpy as np
import igraph
from time import time
from matplotlib import pyplot as plt
from biseqt.blot import WordBlotLocalRef
from biseqt.sequence import Alphabet
from biseqt.stochastics import rand_seq, rand_read, MutationProcess
from util import plot_with_sd, savefig, with_dumpfile, log
def gen_data_set(**kw):
alphabet, sv_type = kw['alphabet'], kw['sv_type']
ref_len, sv_len, sv_pos = kw['ref_len'], kw['sv_len'], kw['sv_pos']
gap, subst = kw['gap'], kw['subst']
coverage, margin = kw['coverage'], kw['margin']
sequencing_kw = {
'len_mean': sv_len * 2,
'len_sd': sv_len * .1,
'expected_coverage': coverage,
}
ref = rand_seq(alphabet, ref_len)
if sv_type == 'insertion':
indiv = ref[:sv_pos] + rand_seq(alphabet, sv_len) + ref[sv_pos:]
elif sv_type == 'deletion':
indiv = ref[:sv_pos] + ref[sv_pos + sv_len:]
elif sv_type == 'duplication':
sv_src = kw['sv_src'], kw['sv_src'] + sv_len
indiv = ref[:sv_pos] + ref[sv_src[0]:sv_src[1]] + ref[sv_pos:]
else:
raise ValueError('unknown SV type %s' % sv_type)
M = MutationProcess(alphabet, subst_probs=subst, ge_prob=gap, go_prob=gap)
reads = [(M.mutate(read)[0], pos)
for read, pos in rand_read(indiv, **sequencing_kw)]
labels = [False] * len(reads)
for idx, (read, start_pos) in enumerate(reads):
overlap_with_sv = min(start_pos + len(read), sv_pos + sv_len) - \
max(start_pos, sv_pos)
if overlap_with_sv < margin:
continue
start_overlap = start_pos < sv_pos
end_overlap = start_pos + len(read) > sv_pos + sv_len
if sv_type == 'insertion':
labels[idx] = start_overlap and end_overlap
elif sv_type == 'deletion':
labels[idx] = start_overlap
elif sv_type == 'duplication':
labels[idx] = start_overlap or end_overlap
else:
raise ValueError('unknown SV type %s' % sv_type)
reads = [read for read, pos in reads]
return {'ref': ref, 'indiv': indiv, 'reads': reads, 'labels': labels}
def chain_paths_on_read(read, sims, **kw):
margin = kw['margin']
seg_graph = igraph.Graph()
for sim in sims:
seg_graph.add_vertex(rec=sim)
seg_graph.add_vertex(name='start')
seg_graph.add_vertex(name='end')
for idx, sim in enumerate(sims):
if sim['read'][0] < margin:
seg_graph.add_edge('start', idx)
if sim['read'][1] > len(read) - margin:
seg_graph.add_edge(idx, 'end')
for (idx0, sim0), (idx1, sim1) in combinations(enumerate(sims), 2):
from0, to0 = sim0['read']
from1, to1 = sim1['read']
overlap_len = min(to0, to1) - max(from0, from1)
if overlap_len > -margin:
# HACK what to do with direction?
if from0 <= from1:
seg_graph.add_edge(idx0, idx1)
else:
seg_graph.add_edge(idx1, idx0)
return seg_graph.get_all_shortest_paths('start', to='end', mode='out')
def chain_paths_on_ref(read, sims, **kw):
margin = kw['margin']
seg_graph = igraph.Graph()
for sim in sims:
seg_graph.add_vertex(rec=sim)
seg_graph.add_vertex(name='start')
seg_graph.add_vertex(name='end')
min_start = min(sim['ref'][0] for sim in sims)
max_end = max(sim['ref'][1] for sim in sims)
for idx, sim in enumerate(sims):
if sim['ref'][0] == min_start:
seg_graph.add_edge('start', idx)
if sim['ref'][1] == max_end:
seg_graph.add_edge(idx, 'end')
for (idx0, sim0), (idx1, sim1) in combinations(enumerate(sims), 2):
from0, to0 = sim0['ref']
from1, to1 = sim1['ref']
overlap_len = min(to0, to1) - max(from0, from1)
if overlap_len > -margin:
# HACK what to do with direction?
if from0 <= from1:
seg_graph.add_edge(idx0, idx1)
else:
seg_graph.add_edge(idx1, idx0)
return seg_graph.get_all_shortest_paths('start', to='end', mode='out')
# mode tells us whether the path is on read or on ref
def sv_coords(sims, path, mode='read'):
assert len(path) == 4
segs_in_order = sorted(path[1:-1], key=lambda i: sims[i]['ref'][0])
coord0 = sims[segs_in_order[0]]['ref']
coord1 = sims[segs_in_order[1]]['ref']
if mode == 'read':
return coord0[1], coord1[0]
else:
return (coord0[1] + coord1[0]) / 2
def call_svs(ref, reads, margin=50, K_min=200, p_min=.8, **WB_kw):
"""Calls structural variants based on single read mappings. For each read,
the algorithm proceeds as follows:
.. figure::
https://www.dropbox.com/s/rjqobtkp60snte7/SV-schematic.png?raw=1
:target:
https://www.dropbox.com/s/rjqobtkp60snte7/SV-schematic.png?raw=1
:alt: lightbox
Schematic representation of the chain graphs used to call structural
variants. Each read (vertical axis) is compared against the reference
sequence (horizontal axis) and in each case two chain graphs are created
on each of the read and reference axes where two similar segments are
connected with an edge if their projections on the corresponding axis
can be consistently chained.
* all local similarities are found via Word-Blot
(:func:`biseqt.blot.WordBlotLocalRef.similar_segments` with given
:math:`K_{\min}, p_{\min}`).
* Two chain graphs are built based on the projections of similarities on
the read and on the reference genome, henceforth the *read graph* and the
*reference graph*.
* Reads containining SVs are identified using the following rules:
* Normal reads are characterized by a shortest path in both the read
and reference graphs with exactly two edges between the start and the
end of projections.
* deletions and duplications produce shortest paths with four edges in
the read graph.
* insertions produce shortest paths with four edges in the reference
graph.
"""
WB = WordBlotLocalRef(ref, **WB_kw)
label_hats = [{'I': [False, []], # insertion
'D': [False, []]} # deletion / duplication
for _ in reads]
for read_idx, read in enumerate(reads):
sims = list(
WB.similar_segments(read, K_min, p_min)
)
for sim_idx, sim in enumerate(sims):
ref_pos, read_pos = WB.to_ij_coordinates_seg(sim['segment'])
sims[sim_idx]['read'] = read_pos
sims[sim_idx]['ref'] = ref_pos
# for duplication or deletion
d_paths = chain_paths_on_read(read, sims, margin=margin)
if d_paths:
d_path = d_paths[0]
label_hats[read_idx]['D'][0] = len(d_path) > 3
if len(d_path) == 4:
label_hats[read_idx]['D'][1] = sv_coords(sims, d_path,
mode='read')
i_paths = chain_paths_on_ref(read, sims, margin=margin)
if i_paths:
i_path = i_paths[0]
label_hats[read_idx]['I'][0] = len(i_path) > 3
if len(i_path) == 4:
label_hats[read_idx]['I'][1] = sv_coords(sims, i_path,
mode='ref')
sys.stderr.write('.')
return label_hats
@with_dumpfile
def sim_structural_variants(**kw):
sv_len, ps, wordlen = kw['sv_len'], kw['ps'], kw['wordlen']
n_samples, coverage, margin = kw['n_samples'], kw['coverage'], kw['margin']
sv_types = ['insertion', 'deletion', 'duplication']
# maps sv types to sv call types
sv_type_dict = {'insertion': 'I', 'duplication': 'D', 'deletion': 'D'}
A = Alphabet('ACGT')
WB_kw = {'g_max': .2, 'sensitivity': .9, 'alphabet': A, 'wordlen': wordlen,
'log_level': logging.WARN}
def _zero(): return np.zeros((len(ps), n_samples))
sim_data = {
'coverage': coverage,
'n_samples': n_samples,
'sv_len': sv_len,
'ps': ps,
'WB_kw': WB_kw,
'coords': {sv_type: {p_idx: {idx: [] for idx in range(n_samples)}
for p_idx in range(len(ps))}
for sv_type in sv_types},
'stats': {stat: {sv_type: _zero() for sv_type in sv_types}
for stat in ['tpr', 'fpr']},
'times': _zero(),
}
for sv_type, (p_idx, p_match) in product(sv_types, enumerate(ps)):
ref_len = sv_len * 5
sv_src = sv_len
sv_pos = sv_len * 3
# distribute p_match evenly over gap and subst
subst = gap = 1 - np.sqrt(p_match)
log('SV: %s, p = %.2f (n=%d rounds)' %
(sv_type, p_match, n_samples))
for sample_idx in range(n_samples):
dataset = gen_data_set(
sv_type=sv_type,
alphabet=A,
ref_len=ref_len,
sv_len=sv_len,
sv_src=sv_src,
sv_pos=sv_pos,
gap=gap,
subst=subst,
coverage=coverage,
margin=margin,
)
labels = dataset['labels']
K_min = 200
p_min = .6
t_start = time()
label_hats = call_svs(dataset['ref'], dataset['reads'],
K_min=K_min, p_min=p_min, **WB_kw)
sim_data['times'][p_idx, sample_idx] = time() - t_start
tp, fp, tn, fn = 0, 0, 0, 0
for label, label_hat in zip(labels, label_hats):
key = sv_type_dict[sv_type]
if label and label_hat[key][0]:
tp += 1
elif label and not label_hat[key][0]:
fn += 1
elif not label and label_hat[key][0]:
fp += 1
elif not label and not label_hat[key][0]:
tn += 1
if label_hat[key][0]:
sim_data['coords'][sv_type][p_idx][sample_idx].append(
label_hat[key][1])
# almost imposible that tn + fp = 0, but tp + fn can be zero:
if tp + fn == 0:
tpr = 1.
else:
tpr = 1. * tp / (tp + fn)
fpr = 1. * fp / (tn + fp)
sim_data['stats']['tpr'][sv_type][p_idx, sample_idx] = tpr
sim_data['stats']['fpr'][sv_type][p_idx, sample_idx] = fpr
sys.stderr.write(' tpr = %.2f, fpr = %.2f' % (tpr, fpr))
sys.stderr.write('\n')
return sim_data
def plot_structural_variants(sim_data, suffix=''):
ps = sim_data['ps']
sv_types = ['duplication', 'deletion', 'insertion']
fig = plt.figure(figsize=(11, 4))
ax_stats = {}
# ax_coord = {}
for idx, sv_type in enumerate(sv_types):
ax_stats[sv_type] = fig.add_subplot(1, 3, idx + 1)
kw = {'markersize': 5, 'marker': 'o', 'alpha': .7}
ls = {'tpr': '-', 'fpr': '--'}
label = {'tpr': 'TPR (sensitivity)', 'fpr': 'FPR (1 - specificity)'}
for sv_type in sv_types:
for stat, values in sim_data['stats'].items():
plot_with_sd(ax_stats[sv_type],
ps, sim_data['stats'][stat][sv_type][:, :], axis=1,
ls=ls[stat], label=label[stat],
**kw)
ax_stats[sv_type].set_ylim(-.1, 1.3)
ax_stats[sv_type].set_title(sv_type)
ax_stats[sv_type].legend(loc='upper left')
ax_stats[sv_type].set_xlabel('match probability')
ax_stats[sv_type].set_xticks(ps)
ax_stats[sv_type].set_xticklabels(ps)
times = sim_data['times'].flatten()
print 'time: %.2f s (%.2f)' % (times.mean(), np.sqrt(times.var()))
savefig(fig, 'structural_variants%s.png' % suffix)
def exp_structural_variants():
"""Performance of Word-Blot in detecting structural variations (SV) in
*simulations*:
* Given a single read containing part of a SV and a reference sequence, one
can deduce the presence of a SV from the topological arrangement of local
similarities. The simple algorithm used here, :func:`call_svs`, simply
pays attention to shortest paths in either of two directed graphs
obtained by chaining projected similarities on either the reference or
the read sequences.
* For each of three copy number variation SVs, *insertion, deletion,
duplication*, similar segments detected by Word-Blot are used to detect
whether each read contains an SV. In each case the true positive rate and
false positive rate are plotted as a function of similarity between
individual and reference sequences.
**Supported Claims**
* The simple topological algorithm in :func:`call_svs` can accurately
distinguish normal reads from those containing a SV.
* The crux of our simple, single-read SV calling algorithm is that,
roughly, SVs can be detected in a single read whenever there are more
than one read/reference local similarities that, when chained, span the
entire read (for duplication and deletion) or a whole region on reference
(for insertion). This shows that Word-Blot accurately recovers local
similarities at their maximal length without producing unnecessary
fragmentation.
* Since Word-Blot is a general purpose local similarity search and thus
makes no assumption of collinearity it can accurately identify simple SVs
(copy number variations) by simply looking at mapping of individual reads
to a reference sequence.
.. figure::
https://www.dropbox.com/s/ase6z7wzlbc6dy0/
structural_variants%5Bw%3D8%5D.png?raw=1
:target:
https://www.dropbox.com/s/ase6z7wzlbc6dy0/
structural_variants%5Bw%3D8%5D.png?raw=1
:alt: lightbox
True positive rate (solid) and false positive rate (dashed) for
discriminating normal reads from those containing evidence for SV,
duplications (*left*), deletion (*middle*), insertion (*right*) using
the simple algorithm based on Word-Blot local similarities. SV length is
500nt, reference sequence length is 2500nt, sequencing coverage is 10,
word length is 8, and each condition (match probability and SV type) is
repeated n=5 times, average computation time for each read is 0.8
seconds.
"""
wordlen = 8 # kept in memory; don't go too high up
sv_len = 500
margin = 50
ps = [round(.98 - .03 * i, 2) for i in range(6)]
coverage = 10
n_samples = 5
suffix = '[w=%d]' % wordlen
dumpfile = 'sv_calling%s.txt' % suffix
sim_data = sim_structural_variants(
sv_len=sv_len, ps=ps, wordlen=wordlen, n_samples=n_samples,
margin=margin, coverage=coverage,
dumpfile=dumpfile, ignore_existing=False,
)
plot_structural_variants(sim_data, suffix=suffix)
if __name__ == '__main__':
exp_structural_variants()
|
bsd-3-clause
|
harisbal/pandas
|
pandas/tests/generic/test_frame.py
|
8
|
9766
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from operator import methodcaller
from copy import deepcopy
from distutils.version import LooseVersion
import pytest
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, date_range, MultiIndex
from pandas.compat import range
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_almost_equal)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from .test_generic import Generic
try:
import xarray
_XARRAY_INSTALLED = True
except ImportError:
_XARRAY_INSTALLED = False
class TestDataFrame(Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_set_axis_name(self):
df = pd.DataFrame([[1, 2], [3, 4]])
funcs = ['_set_axis_name', 'rename_axis']
for func in funcs:
result = methodcaller(func, 'foo')(df)
assert df.index.name is None
assert result.index.name == 'foo'
result = methodcaller(func, 'cols', axis=1)(df)
assert df.columns.name is None
assert result.columns.name == 'cols'
def test_set_axis_name_mi(self):
df = DataFrame(
np.empty((3, 3)),
index=MultiIndex.from_tuples([("A", x) for x in list('aBc')]),
columns=MultiIndex.from_tuples([('C', x) for x in list('xyz')])
)
level_names = ['L1', 'L2']
funcs = ['_set_axis_name', 'rename_axis']
for func in funcs:
result = methodcaller(func, level_names)(df)
assert result.index.names == level_names
assert result.columns.names == [None, None]
result = methodcaller(func, level_names, axis=1)(df)
assert result.columns.names == ["L1", "L2"]
assert result.index.names == [None, None]
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
assert df.bool()
df = DataFrame([[False]])
assert not df.bool()
df = DataFrame([[False, False]])
pytest.raises(ValueError, lambda: df.bool())
pytest.raises(ValueError, lambda: bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_metadata_propagation_indiv(self):
# groupby
df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
result = df.groupby('A').sum()
self.check_metadata(df, result)
# resample
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20130101', periods=1000, freq='s'))
result = df.resample('1T')
self.check_metadata(df, result)
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__
np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b'])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd'])
DataFrame._metadata = ['filename']
df1.filename = 'fname1.csv'
df2.filename = 'fname2.csv'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'merge':
left, right = other.left, other.right
value = getattr(left, name, '') + '|' + getattr(right,
name, '')
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ''))
return self
DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner')
assert result.filename == 'fname1.csv|fname2.csv'
# concat
# GH 6927
DataFrame._metadata = ['filename']
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab'))
df1.filename = 'foo'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
DataFrame.__finalize__ = finalize
result = pd.concat([df1, df1])
assert result.filename == 'foo+foo'
# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize
def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
# have the same name (Issue #8994)
df = DataFrame({'x': [1, 2, 3]})
df.y = 2
df['y'] = [2, 4, 6]
df.y = 5
assert df.y == 5
assert_series_equal(df['y'], Series([2, 4, 6], name='y'))
@pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and
LooseVersion(xarray.__version__) <
LooseVersion('0.10.0'),
reason='xarray >= 0.10.0 required')
@pytest.mark.parametrize(
"index", ['FloatIndex', 'IntIndex',
'StringIndex', 'UnicodeIndex',
'DateIndex', 'PeriodIndex',
'CategoricalIndex', 'TimedeltaIndex'])
def test_to_xarray_index_types(self, index):
from xarray import Dataset
index = getattr(tm, 'make{}'.format(index))
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101',
periods=3,
tz='US/Eastern')}
)
df.index = index(3)
df.index.name = 'foo'
df.columns.name = 'bar'
result = df.to_xarray()
assert result.dims['foo'] == 3
assert len(result.coords) == 1
assert len(result.data_vars) == 8
assert_almost_equal(list(result.coords.keys()), ['foo'])
assert isinstance(result, Dataset)
# idempotency
# categoricals are not preserved
# datetimes w/tz are not preserved
# column names are lost
expected = df.copy()
expected['f'] = expected['f'].astype(object)
expected['h'] = expected['h'].astype('datetime64[ns]')
expected.columns.name = None
assert_frame_equal(result.to_dataframe(), expected,
check_index_type=False, check_categorical=False)
@td.skip_if_no('xarray', min_version='0.7.0')
def test_to_xarray(self):
from xarray import Dataset
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101',
periods=3,
tz='US/Eastern')}
)
df.index.name = 'foo'
result = df[0:0].to_xarray()
assert result.dims['foo'] == 0
assert isinstance(result, Dataset)
# available in 0.7.1
# MultiIndex
df.index = pd.MultiIndex.from_product([['a'], range(3)],
names=['one', 'two'])
result = df.to_xarray()
assert result.dims['one'] == 1
assert result.dims['two'] == 3
assert len(result.coords) == 2
assert len(result.data_vars) == 8
assert_almost_equal(list(result.coords.keys()), ['one', 'two'])
assert isinstance(result, Dataset)
result = result.to_dataframe()
expected = df.copy()
expected['f'] = expected['f'].astype(object)
expected['h'] = expected['h'].astype('datetime64[ns]')
expected.columns.name = None
assert_frame_equal(result,
expected,
check_index_type=False)
def test_deepcopy_empty(self):
# This test covers empty frame copying with non-empty column sets
# as reported in issue GH15370
empty_frame = DataFrame(data=[], index=[], columns=['A'])
empty_frame_copy = deepcopy(empty_frame)
self._compare(empty_frame_copy, empty_frame)
|
bsd-3-clause
|
wronk/mne-python
|
examples/inverse/plot_lcmv_beamformer.py
|
3
|
3197
|
"""
======================================
Compute LCMV beamformer on evoked data
======================================
Compute LCMV beamformer solutions on evoked dataset for three different choices
of source orientation and stores the solutions in stc files for visualisation.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import sample
from mne.beamformer import lcmv
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'
###############################################################################
# Get epochs
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True, proj=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads', selection=left_temporal_channels)
# Pick the channels of interest
raw.pick_channels([raw.ch_names[pick] for pick in picks])
# Re-normalize our empty-room projectors, so they are fine after subselection
raw.info.normalize_proj()
# Read epochs
proj = False # already applied
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=proj,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Compute regularized noise and data covariances
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='shrunk')
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk')
plt.close('all')
pick_oris = [None, 'normal', 'max-power']
names = ['free', 'normal', 'max-power']
descriptions = ['Free orientation', 'Normal orientation', 'Max-power '
'orientation']
colors = ['b', 'k', 'r']
for pick_ori, name, desc, color in zip(pick_oris, names, descriptions, colors):
stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01,
pick_ori=pick_ori)
# View activation time-series
label = mne.read_label(fname_label)
stc_label = stc.in_label(label)
plt.plot(1e3 * stc_label.times, np.mean(stc_label.data, axis=0), color,
hold=True, label=desc)
plt.xlabel('Time (ms)')
plt.ylabel('LCMV value')
plt.ylim(-0.8, 2.2)
plt.title('LCMV in %s' % label_name)
plt.legend()
plt.show()
# Plot last stc in the brain in 3D with PySurfer if available
brain = stc.plot(hemi='lh', subjects_dir=subjects_dir)
brain.set_data_time_index(180)
brain.show_view('lateral')
|
bsd-3-clause
|
jchodera/LiquidBenchmark
|
src/munge_output_amber.py
|
2
|
2349
|
import chemistry
from openmoltools import cirpy
import mdtraj as md
import pymbar
import os
import pandas as pd
import glob
import dipole_errorbars
from density_simulation_parameters import DATA_PATH
num_bootstrap = 100
fixed_block_length = 20 # 200 ps blocks for dielectric error bar block averaging.
prmtop_filenames = glob.glob(DATA_PATH + "/tleap/*.prmtop")
filename_munger = lambda filename: os.path.splitext(os.path.split(filename)[1])[0].split("_")
data = []
for prmtop_filename in prmtop_filenames:
cas, n_molecules, temperature = filename_munger(prmtop_filename)
print(cas, temperature)
dcd_filename = DATA_PATH + "/production/%s_%s_%s_production.dcd" % (cas, n_molecules, temperature)
csv_filename = DATA_PATH + "/production/%s_%s_%s_production.csv" % (cas, n_molecules, temperature)
try:
traj = md.load(dcd_filename, top=prmtop_filename)
except IOError:
continue
if traj.unitcell_lengths is None: continue
rho = pd.read_csv(csv_filename)["Density (g/mL)"].values * 1000. # g / mL -> kg /m3
initial_traj_length = len(traj)
initial_density_length = len(rho)
[t0, g, Neff] = pymbar.timeseries.detectEquilibration(rho)
mu = rho[t0:].mean()
sigma = rho[t0:].std() * Neff ** -0.5
prmtop = chemistry.load_file(prmtop_filename)
charges = prmtop.to_dataframe().charge.values
temperature = float(temperature)
traj = traj[t0 * len(traj) / len(rho):]
dielectric = md.geometry.static_dielectric(traj, charges, temperature)
dielectric_sigma_fixedblock = dipole_errorbars.bootstrap_old(traj, charges, temperature, fixed_block_length)[1]
block_length = dipole_errorbars.find_block_size(traj, charges, temperature)
dielectric_sigma = dipole_errorbars.bootstrap(traj, charges, temperature, block_length, num_bootstrap)
formula = cirpy.resolve(cas, "formula")
data.append(dict(cas=cas, temperature=temperature, n_trimmed=t0, inefficiency=g, initial_traj_length=initial_traj_length, initial_density_length=initial_density_length, density=mu, density_sigma=sigma, Neff=Neff, n_frames=traj.n_frames, dielectric=dielectric, dielectric_sigma=dielectric_sigma, dielectric_sigma_fixedblock=dielectric_sigma_fixedblock, block_length=block_length, formula=formula))
print(data[-1])
data = pd.DataFrame(data)
data.to_csv("./tables/predictions.csv")
|
gpl-2.0
|
spectre007/CCParser
|
ParserData.py
|
1
|
11735
|
from . import constants as c
import numpy as np
import json
class Struct(object):
""" Struct-like container object """
def __init__(self, **kwds): # keyword args define attribute names and values
self.__dict__.update(**kwds)
class ParseContainer(object):
""" Generic container object which keeps track of the parsed quantities.
It allows to parse the same quantity several times.
There should be one instance/parsed quantity. """
def __init__(self):
self.nversion = 0
self.data = []
self.lines = []
self.serializable = False
@classmethod
def from_obj(cls, line, parsed_obj):
"""Alternative constructor. Initialize directly with line and parsed
object.
Parameters
----------
line : int
line number
parsed_obj : any
parsed object
"""
pc = cls()
pc.add(line, parsed_obj)
return pc
def add(self, hook_line, new_obj):
#self.data[hook_line] = new_pvalue
self.data.append(new_obj)
self.lines.append(hook_line)
self.nversion += 1
def get_first(self):
idx = self.lines.index(min(self.lines))#not needed if assuming ordered parsing (line by line)
#return self.data[0]
return self.data[idx]
def get_last(self):
idx = self.lines.index(max(self.lines))#not needed if assuming ordered parsing (line by line)
# return self.data[-1]
return self.data[idx]
def get_data(self):
return self.data
def get_lines(self):
return self.lines
def make_serializable(self):
"""Turn fancy data types into sth that json.dump can recognize. """
try:
dt = type(self.data[0])
except IndexError:
raise ParserDataError(("ParseContainer not serializable (data list"
" empty)."))
# take care of numpy data types
if dt.__module__ == "numpy" or "numpy." in dt.__module__:
encoder = NumpyEncoder()
self.data = [encoder.default(obj) for obj in self.data]
# CCParser data types
elif dt == MolecularOrbitals or dt == Amplitudes:
self.data = [obj.encode() for obj in self.data]
# assume other datatypes are serializable
self.serializable = True
def to_tuple(self):
if self.serializable:
return tuple(zip(self.data, self.lines))
else:
self.make_serializable()
return tuple(zip(self.data, self.lines))
def to_list(self):
if self.serializable:
return list(zip(self.data, self.lines))
else:
self.make_serializable()
return list(zip(self.data, self.lines))
def __len__(self):
assert len(self.data) == len(self.lines)
return len(self.data)
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.data[idx.start : idx.stop : idx.step]
else:
if idx >= len(self.data) or abs(idx) > len(self.data):
raise IndexError("ParseContainer: Index out of range")
return self.data[idx]
def __setitem__(self, idx, value):
""" Setter method which expects value tuple (line, parsed_obj) """
self.lines[idx] = value[0]
self.data[idx] = value[1]
def __delitem__(self, idx):
self.data.remove(idx)
self.lines.remove(idx)
def __iter__(self):
return iter(self.data)
# def __next__(self):
# if self.n <= self.nversion:
# return self.data[self.n]
# else:
# raise StopIteration
def __contains__(self, line):
if type(line) == str:
line = int(line)
return True if line in self.lines else False
def __str__(self):
s = "\n"
s+= "Line" + 3*" " + "Parsed Value\n"
for i in range(self.nversion):
s+= str(self.lines[i]) + 3*" " + str(self.data[i]) + "\n"
return s
class ParserDataError(Exception):
"""Raise for ParserData related errors. """
class StructEncoder(json.JSONEncoder):
def default(self, struct):
if isinstance(struct, Struct):
results = {}
for label, pc in struct.__dict__.items():
results[label] = pc.to_list()
return results
else:
super().default(self, struct)
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16,
np.int32, np.int64, np.uint8, np.uint16, np.uint32,
np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)): #### This is the fix
return obj.tolist()
else:
super().default(self, obj)
class MolecularOrbitals(object):
# TODO: change name? "OrbitalEnergies"
# TODO: add symmetries
""" General molecular orbital class, which has more functionality than
simple arrays.
"""
N_ORB_PER_LINE = 10
def __init__(self, o, v):
self.occ = list(map(float, o))
self.virt = list(map(float, v))
self.n_occ = len(o)
self.n_virt = len(v)
self.n_mo = self.n_occ + self.n_virt
self.homo = max(self.occ ) if self.n_occ > 0 else 0
self.lumo = min(self.virt) if self.n_virt > 0 else 0
@classmethod
def from_dict(cls, d):
try:
return cls(d["occ"], d["virt"])
except (KeyError, TypeError) as e:
raise ParserDataError(("Dictionary not suitable to create "
"MolecularOrbitals object."))
#TODO: from_string method as implicit list conversion is not great
@classmethod
def from_tuples(cls, t):
# find first occurrence of virt
idx = next(t.index(i) for i in t if i[1] == "virt" or i[1] == "v")
# create lists using the index
o, dummy = zip(*t[:idx])
v, dummy = zip(*t[idx:])
return cls(o, v)
def __str__(self):
n1 = [i for i in range(1, self.n_occ+1)]
n2 = [i for i in range(self.n_occ +1, self.n_mo+1)]
s = "\n"
for i in range(0, len(self.occ), self.N_ORB_PER_LINE):
s += 4*" " + " ".join("{:>8}".format(j) for j in n1[i:i+self.N_ORB_PER_LINE])+"\n"
if i == 0:
s += " occ: " +' '.join("{:8.3f}".format(j) for j in self.occ[i:i+self.N_ORB_PER_LINE])+"\n"
else:
s += 6*" "+' '.join("{:8.3f}".format(j) for j in self.occ[i:i+self.N_ORB_PER_LINE])+"\n"
s += 7*" "+88*"-"+"\n"
for i in range(0, len(self.virt), self.N_ORB_PER_LINE):
s += 4*" " + " ".join("{:>8}".format(j) for j in n2[i:i+self.N_ORB_PER_LINE])+"\n"
if i == 0:
s += " virt:" +' '.join("{:8.3f}".format(j) for j in self.virt[i:i+self.N_ORB_PER_LINE])+"\n"
else:
s += 6*" "+' '.join("{:8.3f}".format(j) for j in self.virt[i:i+self.N_ORB_PER_LINE])+"\n"
return s
def RVS(self, gap):
""" Determine amount of virtual orbitals to freeze based on energy gap (in eV) """
if gap <= 0:
raise ValueError("Negative or Zero energy gap not allowed for restriction of virtual space.")
else:
thr = gap/c.Hartree2eV + self.homo
# print("THR: ",thr)
# print("N_VIRT: ", self.n_virt)
idx = min(range(len(self.virt)), key=lambda i: abs(self.virt[i]-thr))
freeze = self.n_virt - (idx +1)
part_of_v = float(freeze)/float(self.n_virt)
s = "Index: {0:3d}, Number of frozen virtuals: {1:3d}, ({2:.1%})".format(idx, freeze, part_of_v)
print(s)
def to_dict(self):
return {"occ" : self.occ, "virt" : self.virt}
def to_tuples(self):
return list(zip(self.occ, ["occ" for i in range(self.n_occ )])) \
+ list(zip(self.virt, ["virt" for i in range(self.n_virt)]))
def encode(self, fmt=tuple):
if fmt == tuple:
return self.to_tuples()
elif fmt == dict:
return self.to_dict()
else:
raise ValueError("Export format not recognized.")
class Amplitudes(object):
""" General container for amplitudes of one state for easier access to and export of amplitude data """
def __init__(self, occ, virt, v, factor=1.0):
self.occ = occ # list of lists, even if only single int in sublist
self.virt= virt
self.v = v
self.factor = factor
self.weights = list(map(lambda x: self.factor * x**2, self.v))
self.print_thr = 0.05
def __str__(self):
s = "Amplitudes: Weights > {0:.0%}\n".format(self.print_thr)
for i in range(len(self.occ)):
if self.weights[i] > self.print_thr:
if len(self.occ[i]) == 1:
s += "{0:>4} -> {1:>4} : {2:.1f}\n".format(self.occ[i][0],
self.virt[i][0], 100*self.weights[i])
elif len(self.occ[i]) == 2:
s += "{0:>4}, {1:>4} -> {2:>4}, {3:>4} : {4:.1f}\n".format(
self.occ[i][0], self.occ[i][1], self.virt[i][0],
self.virt[i][1], 100*self.weights[i])
return s
@classmethod
def from_list(cls, allinone, factor=1.0):
""" Alternative constructor which expects single list.
Format: [[occ_i, occ_j,..., virt_a, virt_b,..., ampl], ...] """
occ, virt, v = [], [], []
for transition in allinone:
assert(len(transition) % 2 != 0)
n_mo = int((len(transition)-1)/2)
occ.append(transition[0:n_mo])# slices yield list, even if only one element
virt.append(transition[n_mo:-1])
v.append(transition[-1])# index yields float
return cls(occ, virt, v, factor)
def to_dataframe(self, thresh=0.05):
""" Converts the amplitude data to handy pandas.DataFrame object """
try:
import pandas as pd
except ImportError:
raise ImportError("Module 'pandas' needed for 'Amplitudes.to_dataframe()' ")
# TODO: improve this clunky part
max_exc = max(list(map(len,self.occ)))
occ, virt = [], []
for i in range(len(self.occ)):
occ.append(self.occ[i] + [0]*(max_exc - len(self.occ[i])))
virt.append(self.virt[i] + [0]*(max_exc - len(self.virt[i])))
idx_o = list(map(lambda x: "occ_"+str(x), [n for n in range(1,max_exc+1)]))
idx_v = list(map(lambda x: "virt_"+str(x), [n for n in range(1,max_exc+1)]))
df = pd.concat([pd.DataFrame(occ, columns=idx_o),
pd.DataFrame(virt, columns=idx_v),
pd.Series(self.weights, name="weight")], axis=1)
return df[(df["weight"] > thresh)] # trim DataFrame based on awesome function
def to_list(self):
""" Return single list of amplitude data in the format:
[[occ_i, occ_j,..., virt_a, virt_b,..., ampl], ...]
"""
ampl = []
for i, v in enumerate(self.v):
ampl.append(self.occ[i] + self.virt[i] + [v])
return ampl
def encode(self, fmt=list):
if fmt == list:
return self.to_list()
# elif fmt == pd.DataFrame:
# return self.to_dataframe()
else:
raise ValueError("Export format not recognized.")
|
mit
|
Ldpe2G/mxnet
|
example/kaggle-ndsb1/submission_dsb.py
|
15
|
4287
|
from __future__ import print_function
import pandas as pd
import os
import time as time
## Receives an array with probabilities for each class (columns) X images in test set (as listed in test.lst) and formats in Kaggle submission format, saves and compresses in submission_path
def gen_sub(predictions,test_lst_path="test.lst",submission_path="submission.csv"):
## append time to avoid overwriting previous submissions
## submission_path=time.strftime("%Y%m%d%H%M%S_")+submission_path
### Make submission
## check sampleSubmission.csv from kaggle website to view submission format
header = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# read first line to know the number of columns and column to use
img_lst = pd.read_csv(test_lst_path,sep="/",header=None, nrows=1)
columns = img_lst.columns.tolist() # get the columns
cols_to_use = columns[len(columns)-1] # drop the last one
cols_to_use= map(int, str(cols_to_use)) ## convert scalar to list
img_lst= pd.read_csv(test_lst_path,sep="/",header=None, usecols=cols_to_use) ## reads lst, use / as sep to goet last column with filenames
img_lst=img_lst.values.T.tolist()
df = pd.DataFrame(predictions,columns = header, index=img_lst)
df.index.name = 'image'
print("Saving csv to %s" % submission_path)
df.to_csv(submission_path)
print("Compress with gzip")
os.system("gzip -f %s" % submission_path)
print(" stored in %s.gz" % submission_path)
|
apache-2.0
|
ephes/scikit-learn
|
sklearn/covariance/tests/test_graph_lasso.py
|
272
|
5245
|
""" Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
bsd-3-clause
|
jampp/airflow
|
airflow/hooks/dbapi_hook.py
|
17
|
5081
|
from builtins import str
from past.builtins import basestring
from datetime import datetime
import numpy
import logging
from airflow.hooks.base_hook import BaseHook
from airflow.utils import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
# Whether the db supports a special type of autocmmit
supports_autocommit = False
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_pandas_df(self, sql, parameters=None):
'''
Executes the sql and returns a pandas dataframe
'''
import pandas.io.sql as psql
conn = self.get_conn()
df = psql.read_sql(sql, con=conn)
conn.close()
return df
def get_records(self, sql, parameters=None):
'''
Executes the sql and returns a set of records.
'''
conn = self.get_conn()
cur = self.get_cursor()
cur.execute(sql)
rows = cur.fetchall()
cur.close()
conn.close()
return rows
def get_first(self, sql, parameters=None):
'''
Executes the sql and returns a set of records.
'''
conn = self.get_conn()
cur = conn.cursor()
cur.execute(sql)
rows = cur.fetchone()
cur.close()
conn.close()
return rows
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
"""
conn = self.get_conn()
if isinstance(sql, basestring):
sql = [sql]
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
cur = conn.cursor()
for s in sql:
cur.execute(s)
conn.commit()
cur.close()
conn.close()
def set_autocommit(self, conn, autocommit):
conn.autocommit = autocommit
def get_cursor(self):
"""Returns a cursor"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
the whole set of inserts is treated as one transaction
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
conn = self.get_conn()
cur = conn.cursor()
if self.supports_autocommit:
cur.execute('SET autocommit = 0')
conn.commit()
i = 0
for row in rows:
i += 1
l = []
for cell in row:
if isinstance(cell, basestring):
l.append("'" + str(cell).replace("'", "''") + "'")
elif cell is None:
l.append('NULL')
elif isinstance(cell, numpy.datetime64):
l.append("'" + str(cell) + "'")
elif isinstance(cell, datetime):
l.append("'" + cell.isoformat() + "'")
else:
l.append(str(cell))
values = tuple(l)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(values))
cur.execute(sql)
if i % commit_every == 0:
conn.commit()
logging.info(
"Loaded {i} into {table} rows so far".format(**locals()))
conn.commit()
cur.close()
conn.close()
logging.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
def get_conn(self):
"""
Retuns a sql connection that can be used to retrieve a cursor.
"""
raise NotImplemented()
|
apache-2.0
|
anntzer/scikit-learn
|
examples/svm/plot_oneclass.py
|
95
|
2419
|
"""
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s, edgecolors='k')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s,
edgecolors='k')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s,
edgecolors='k')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
|
bsd-3-clause
|
samuel1208/scikit-learn
|
sklearn/utils/multiclass.py
|
92
|
13986
|
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
import warnings
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_sequence_of_sequence(y):
if hasattr(y, '__array__'):
y = np.asarray(y)
return set(chain.from_iterable(y))
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-sequences': _unique_sequence_of_sequence,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1] for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %r" % ys)
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_label_indicator_matrix(y):
""" Check if ``y`` is in the label indicator matrix format (multilabel).
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a label indicator matrix format,
else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_label_indicator_matrix
>>> is_label_indicator_matrix([0, 1, 0, 1])
False
>>> is_label_indicator_matrix([[1], [0, 2], []])
False
>>> is_label_indicator_matrix(np.array([[1, 0], [0, 0]]))
True
>>> is_label_indicator_matrix(np.array([[1], [0], [0]]))
False
>>> is_label_indicator_matrix(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def is_sequence_of_sequences(y):
""" Check if ``y`` is in the sequence of sequences format (multilabel).
This format is DEPRECATED.
Parameters
----------
y : sequence or array.
Returns
-------
out : bool,
Return ``True``, if ``y`` is a sequence of sequences else ``False``.
"""
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
try:
if hasattr(y, '__array__'):
y = np.asarray(y)
out = (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types))
except (IndexError, TypeError):
return False
if out:
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
return out
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
return is_label_indicator_matrix(y) or is_sequence_of_sequences(y)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-sequences': `y` is a sequence of sequences, a 1d
array-like of objects that are sequences of labels.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_sequence_of_sequences(y):
return 'multilabel-sequences'
elif is_label_indicator_matrix(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# known to fail in numpy 1.3 for array of arrays
return 'unknown'
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown'
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown'
elif y.ndim == 2 and y.shape[1] > 1:
suffix = '-multioutput'
else:
# column vector or 1d
suffix = ''
# check float and contains non-integer float values:
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
return 'continuous' + suffix
if len(np.unique(y)) <= 2:
assert not suffix, "2d binary array-like should be multilabel"
return 'binary'
else:
return 'multiclass' + suffix
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
|
bsd-3-clause
|
mattgiguere/scikit-learn
|
sklearn/metrics/regression.py
|
27
|
9558
|
"""Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array-like of shape = [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples, n_outputs]
Estimated target values.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
y_type = 'continuous' if y_true.shape[1] == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred
def _average_and_variance(values, sample_weight=None):
"""
Compute the (weighted) average and variance.
Parameters
----------
values : array-like of shape = [n_samples] or [n_samples, n_outputs]
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average : float
The weighted average
variance : float
The weighted variance
"""
values = np.asarray(values)
if values.ndim == 1:
values = values.reshape((-1, 1))
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if sample_weight.ndim == 1:
sample_weight = sample_weight.reshape((-1, 1))
average = np.average(values, weights=sample_weight)
variance = np.average((values - average)**2, weights=sample_weight)
return average, variance
def mean_absolute_error(y_true, y_pred, sample_weight=None):
"""Mean absolute error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
return np.average(np.abs(y_pred - y_true).mean(axis=1),
weights=sample_weight)
def mean_squared_error(y_true, y_pred, sample_weight=None):
"""Mean squared error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
return np.average(((y_pred - y_true) ** 2).mean(axis=1),
weights=sample_weight)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred, sample_weight=None):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like
Ground truth (correct) target values.
y_pred : array-like
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
The explained variance.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if y_type != "continuous":
raise ValueError("{0} is not supported".format(y_type))
_, numerator = _average_and_variance(y_true - y_pred, sample_weight)
_, denominator = _average_and_variance(y_true, sample_weight)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
def r2_score(y_true, y_pred, sample_weight=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
The R^2 score.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(dtype=np.float64)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
|
bsd-3-clause
|
allisony/triangle.py
|
tests.py
|
3
|
3274
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["test_hist2d", "test_corner"]
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as pl
import triangle
FIGURE_PATH = "test_figures"
def _run_hist2d(nm, N=50000, seed=1234, **kwargs):
print(" .. {0}".format(nm))
if not os.path.exists(FIGURE_PATH):
os.makedirs(FIGURE_PATH)
# Generate some fake data.
np.random.seed(seed)
x = np.random.randn(N)
y = np.random.randn(N)
fig, ax = pl.subplots(1, 1, figsize=(8, 8))
triangle.hist2d(x, y, ax=ax, **kwargs)
fig.savefig(os.path.join(FIGURE_PATH, "hist2d_{0}.png".format(nm)))
pl.close(fig)
def test_hist2d():
_run_hist2d("cutoff", range=[(0, 4), (0, 2.5)])
_run_hist2d("cutoff2", range=[(-4, 4), (-0.1, 0.1)], N=100000,
fill_contours=True, smooth=1)
_run_hist2d("basic")
_run_hist2d("color", color="g")
_run_hist2d("levels1", levels=[0.68, 0.95])
_run_hist2d("levels2", levels=[0.5, 0.75])
_run_hist2d("filled", fill_contours=True)
_run_hist2d("smooth1", bins=50)
_run_hist2d("smooth2", bins=50, smooth=(1.0, 1.5))
_run_hist2d("philsplot", plot_datapoints=False, fill_contours=True,
levels=[0.68, 0.95], color="g", bins=50, smooth=1.)
def _run_corner(nm, pandas=False, N=10000, seed=1234, ndim=3, ret=False,
factor=None, **kwargs):
print(" .. {0}".format(nm))
if not os.path.exists(FIGURE_PATH):
os.makedirs(FIGURE_PATH)
np.random.seed(seed)
data1 = np.random.randn(ndim*4*N/5.).reshape([4*N/5., ndim])
data2 = (5 * np.random.rand(ndim)[None, :]
+ np.random.randn(ndim*N/5.).reshape([N/5., ndim]))
data = np.vstack([data1, data2])
if factor is not None:
data[:, 0] *= factor
data[:, 1] /= factor
if pandas:
data = pd.DataFrame.from_items(zip(map("d{0}".format, range(ndim)),
data.T))
fig = triangle.corner(data, **kwargs)
fig.savefig(os.path.join(FIGURE_PATH, "triangle_{0}.png".format(nm)))
if ret:
return fig
else:
pl.close(fig)
def test_corner():
_run_corner("basic")
_run_corner("labels", labels=["a", "b", "c"])
_run_corner("quantiles", quantiles=[0.16, 0.5, 0.84])
_run_corner("color", color="g")
fig = _run_corner("color-filled", color="g", fill_contours=True,
ret=True)
_run_corner("overplot", seed=15, color="b", fig=fig, fill_contours=True)
_run_corner("smooth1", bins=50)
_run_corner("smooth2", bins=50, smooth=1.0)
_run_corner("smooth1d", bins=50, smooth=1.0, smooth1d=1.0)
_run_corner("titles1", show_titles=True)
_run_corner("top-ticks", top_ticks=True)
_run_corner("pandas", pandas=True)
_run_corner("truths", truths=[0.0, None, 0.15])
_run_corner("no-fill-contours", no_fill_contours=True)
# _run_corner("mathtext", factor=1e8, use_math_text=True)
fig = _run_corner("tight", ret=True)
pl.tight_layout()
fig.savefig(os.path.join(FIGURE_PATH, "triangle_tight.png"))
pl.close(fig)
if __name__ == "__main__":
print("Testing 'hist2d'")
test_hist2d()
print("Testing 'corner'")
test_corner()
|
bsd-2-clause
|
tlee753/stock-market-analysis
|
python/pandasTest.py
|
1
|
1046
|
from pandas_datareader import data
import pandas as pd
# Define the instruments to download. We would like to see Apple, Microsoft and the S&P500 index.
tickers = ['AAPL', 'MSFT', 'SPY']
# Define which online source one should use
data_source = 'google'
# We would like all available data from 01/01/2000 until 12/31/2016.
start_date = '2010-01-01'
end_date = '2016-12-31'
# User pandas_reader.data.DataReader to load the desired data. As simple as that.
panel_data = data.DataReader(tickers, data_source, start_date, end_date)
# Getting just the adjusted closing prices. This will return a Pandas DataFrame
# The index in this DataFrame is the major index of the panel_data.
close = panel_data.ix['Close']
# Getting all weekdays between 01/01/2000 and 12/31/2016
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
# How do we align the existing prices in adj_close with our new set of dates?
# All we need to do is reindex close using all_weekdays as the new index
close = close.reindex(all_weekdays)
close.head(10)
|
mit
|
timothydmorton/qa_explorer
|
explorer/dataset.py
|
1
|
30548
|
import numpy as np
import pandas as pd
import holoviews as hv
from functools import partial
import pickle
import tempfile
import os, shutil
import fastparquet
import dask.dataframe as dd
from holoviews.operation.datashader import dynspread, datashade
from .functors import Functor, CompositeFunctor, Column, RAColumn, DecColumn, Mag
from .functors import StarGalaxyLabeller
from .catalog import MatchedCatalog, MultiMatchedCatalog, IDMatchedCatalog, MultiBandCatalog
from .plots import filter_dset
class QADataset(object):
"""Container to coordinate and visualize function calculations on catalogs
The fundamental thing that a `QADataset` does is compute a `pandas.DataFrame`
containing the results of evaluating the desired functors on the desired
`Catalog`: the `.df` attribute.
In addition to containing a column for the result of each
`Functor` calculation, `.df` also always contains columns for coordinates
(`ra`, `dec`), object type label (`label`), x-coordinate of scatter plots
(`x`, by default psfMag), an "id" column that will be either
`ccdId` or `patchId`, depending on which is relevant,
and columns for any desired boolean flags.
In addition to the `.df` attribute, a `QADataset` also puts together a
`holoviews.Dataset` object that wraps this data, in the `.ds` attribute.
This is the object that can get passed to various plotting functions
from `explorer.plots`.
A `QADataset` can take different kinds of `Catalog` objects,
and the the computed
`.df` and `.ds` attributes will look slightly different in each case:
* With a normal single
`Catalog`, the dataframe will contain the functor computations in columns
keyed by the keys of the functor dictionary, and `.ds` will just
be a simple wrapper of this.
* With a `MatchedCatalog`,
the columns will have the same names but will contain the *difference*
of the computations between the two catalogs (unless a
functor has `allow_difference=False`).
* With a `MultiMatchedCatalog`,
the columns of `.df` will be multi-level indexed, containing the full
information of each quantity calculated on each catalog (this is
where the memeory footprint can begin to climb). In this case, `.ds`
contains the standard deviation of each measurement among all the catalogs;
also, a `_ds_dict` attribute is created, where a `holoviews.Dataset`
object can be accessed for each individual catalog (keyed by visit name or
'coadd').
* Using a `MultiBandCatalog`, the `.df` attribute contains functor computations
for each band, in a multi-level column index, and in addition contains
color columns for all magnitude functors provided. In this case, a special
`.color_ds` attribute
This object is pickleable, and can be dumped to file using the `.save()`
method. Especially when using `MultiMatchedCatalog` catalogs, the
dataframe computations can take a long time, so this can be desirable.
Note that the `client` object cannot get saved though, so client must be
re-initialized on load, which you can do with the `.load()` classmethod.
Parameters
----------
catalog : explorer.Catalog
Catalog on which to perform calculations. May be any type of catalog.
funcs : dict or list
Dictionary or list of functors. If list, then the names of the `Functor`
columns will be `y0, y1, y2, ...`.
flags : list
List of flags to load from catalog
xFunc : explorer.Functor
Functor to treat as the abscissa for the scatter plots.
Default is `Mag('base_PsfFlux')`.
labeller : explorer.functors.Labeller
Functor to assign labels to sources. Default is
`explorer.functors.StarGalaxyLabeller()`.
query : str
[Not implemented fully or tested. Do not use.]
client : distributed.Client
Dask cluster client to be passed to evaluation of functors.
cachedir : str
Directory to which to write dataframe if `self.oom` is True.
oom : bool
Whether to store computed dataframe out of memory. Future
to-be-implemented feature; not really used yet.
"""
def __init__(self, catalog, funcs, flags=None,
xFunc=Mag('base_PsfFlux', allow_difference=False),
labeller=StarGalaxyLabeller(),
query=None, client=None,
cachedir=None, oom=False):
self._set_catalog(catalog)
self._set_funcs(funcs, xFunc, labeller)
self._set_flags(flags)
self.client = client
self._query = query
if cachedir is None:
cachedir = tempfile.gettempdir()
self._cachedir = cachedir
self._df_file = None
self.oom = oom
def save(self, filename, protocol=4):
"""Write to file
Parameters
----------
filename : str
Filename to write pickle file to. By convention,
should end with ".pkl"
"""
pickle.dump(self, open(filename, 'wb'), protocol=protocol)
@classmethod
def load(cls, filename, client=None):
"""Restore from a saved file
Parameters
----------
filename : str
Filename to load previously saved `QADataset` from.
client : distributed.Client
Client object, if desired. Note that any client previously
set was not saved, so this must be re-initialized if desired.
"""
new = pickle.load(open(filename, 'rb'))
new.client = client
return new
def __getstate__(self):
odict = self.__dict__
client = self.client
odict['client'] = None
return odict
def __setstate__(self, d):
self.__dict__ = d
def __del__(self):
if self._df_computed and self.oom:
os.remove(self.df_file)
def _set_catalog(self, catalog):
"""Change catalog
Sets catalog to be a new `Catalog`, and resets data structures.
Parameters
----------
catalog : explorer.Catalog
New catalog.
"""
self.catalog = catalog
self._reset()
def _set_funcs(self, funcs, xFunc, labeller):
"""Initialize functions
Parameters
----------
funcs : dict or list
Dictionary or list of functors. If list, then will be
converted into dictionary keyed by `y0, y1, y2, ...`.
xFunc : explorer.functors.Functor
`Functor` to function as the x-coordinate of the scatter
plots.
labeller : explorer.functors.Labeller
`Functor` to label points.
"""
if isinstance(funcs, list) or isinstance(funcs, tuple):
self.funcs = {'y{}'.format(i):f for i,f in enumerate(funcs)}
elif isinstance(funcs, Functor):
self.funcs = {'y0':funcs}
else:
self.funcs = funcs
self.xFunc = xFunc
self.labeller = labeller
self._reset()
def _set_flags(self, flags):
if flags is None:
self.flags = []
else:
self.flags = flags # TODO: check to make sure flags are valid
self._reset()
def _reset(self):
"""Sets state such that data structurs need to be recomputed
Necessary after changing catalog, or query, for example.
"""
self._df_computed = False
self._ds = None
@property
def query(self):
return self._query
@query.setter
def query(self, new):
self._query = new
self._reset()
@property
def allfuncs(self):
"""Dictionary of all functors to be computed from catalog
In addition to the ones provided upon initialization of the
`QADataset`, this also contains `ra`, `dec`, `x`, `label`,
`ccdId`/`patchId`, and all flags.
"""
allfuncs = self.funcs.copy()
# Set coordinates and x value
allfuncs.update({'ra':RAColumn(), 'dec': DecColumn(),
'x':self.xFunc})
if self.id_name is not None:
allfuncs.update({self.id_name : Column(self.id_name)})
# Include flags
allfuncs.update({f:Column(f) for f in self.flags})
if self.labeller is not None:
allfuncs.update({'label':self.labeller})
return allfuncs
@property
def df(self):
"""Dataframe containing results of computation
"""
if not self._df_computed:
self._make_df()
return self._df
# Save below for when trying to do more out-of-memory
# df = pd.read_hdf(self.df_file, 'df')
df = pd.read_parquet(self.df_file) # wait for pandas 0.22
# df = dd.read_parquet(self.df_file)
return df
@property
def is_matched(self):
return isinstance(self.catalog, MatchedCatalog)
@property
def is_multi_matched(self):
return isinstance(self.catalog, MultiMatchedCatalog)
@property
def is_idmatched(self):
return isinstance(self.catalog, IDMatchedCatalog)
@property
def is_multiband(self):
return isinstance(self.catalog, MultiBandCatalog)
@property
def id_name(self):
"""patchId or ccdId, as appropriate
Necessary in order to know which image to load to inspect
object.
"""
if self.is_idmatched:
name = 'patchId'
elif self.is_multi_matched:
name = 'ccdId'
elif self.is_matched and not self.is_multi_matched:
if 'ccdId' in self.catalog.cat1.columns:
name = 'ccdId'
elif 'patchId' in self.catalog.cat1.columns:
name = 'patchId'
else:
logging.warning('No id name available (looked for ccdId, patchId)?')
name = None
elif 'ccdId' in self.catalog.columns:
name = 'ccdId'
elif 'patchId' in self.catalog.columns:
name = 'patchId'
else:
logging.warning('No id name available (looked for ccdId, patchId)?')
name = None
return name
@property
def mag_names(self):
"""Names of magnitude functors.
Used in order to calculate color information if catalog is a `MultiBandCatalog`.
"""
return [name for name, fn in self.funcs.items() if isinstance(fn, Mag)]
@property
def df_file(self):
"""File to store out-of-memory df in
[Not really used yet, but placeholder]
"""
if self._df_file is None:
self._df_file = os.path.join(self._cachedir, next(tempfile._get_candidate_names()))
return self._df_file
def _make_df(self, **kwargs):
"""Compute dataframe.
This is called if the `.df` attribute is accessed
but `._df_computed` is False.
"""
f = CompositeFunctor(self.allfuncs)
if self.is_multi_matched:
kwargs.update(how='all')
df = f(self.catalog, query=self.query, client=self.client, dropna=False, **kwargs)
if self.is_matched and not self.is_idmatched:
df = pd.concat([df, self.catalog.match_distance.dropna(how='all')], axis=1)
if not self.is_matched:
df = df.dropna(how='any')
df = df.replace([-np.inf, np.inf], np.nan)
# Add color columns if catalog is a MultiBandCatalog
if self.is_multiband:
cat = self.catalog
color_dfs = []
filters = cat.filters
n_filts = len(filters)
cols_to_difference = cat.color_groups
for mag in self.mag_names:
col_names = [('{}_color'.format(mag), color) for color in cat.colors]
mags = df[mag]
color_df = pd.DataFrame({c : mags[c1] - mags[c2] for c, (c1, c2) in zip(col_names, cols_to_difference)})
color_df.dropna(how='any', inplace=True)
df = pd.concat([df, color_df], axis=1)
if self.oom:
# df.to_hdf(self.df_file, 'df') #must be format='table' if categoricals included
df.to_parquet(self.df_file) # wait for pandas 0.22
# fastparquet.write(self.df_file, df) # Doesn't work with multiindexing
self._df_computed = True
self._df = df
@property
def ds(self):
"""Holoviews Dataset wrapper of the underlying dataframe
"""
if self._ds is None:
self._make_ds()
return self._ds
def get_ds(self, key):
"""Get holoviews dataset corresponding to specific catalog
This is relevant for `MultiMatchedCatalogs`, where multiple
`holoviews.Dataset` objects are created and saved in the `_ds_dict`
attribute.
"""
if self._ds is None:
self._make_ds()
return self._ds_dict[key]
def get_color_ds(self, key):
"""Get holoviews "color" dataset corresponding to particular magnitude
This is relevant for `MultiBandCatalog`, where multiple
`holoviews.Dataset` objects are created and saved in the `_color_ds_dict`
attribute, keyed by magnitude name.
"""
if self._ds is None:
self._make_ds()
return self._color_ds_dict[key]
def _get_kdims(self):
"""Get key dimensions, for generating holoviews Datasets
Key dimensions are ra, dec, x, label, ccdId/patchId, and all flags
"""
kdims = ['ra', 'dec', hv.Dimension('x', label=self.xFunc.name), 'label']
if self.id_name is not None:
kdims.append(self.id_name)
kdims += self.flags
return kdims
def _make_ds(self, **kwargs):
"""Create holoviews.Dataset objects needed to generate plots.
"""
kdims = self._get_kdims()
vdims = []
for k,v in self.allfuncs.items():
if k in ('ra', 'dec', 'x', 'label', self.id_name) or k in self.flags:
continue
label = v.name
if v.allow_difference and not self.is_multiband:
if self.is_multi_matched:
label = 'std({})'.format(label)
elif self.is_matched:
label = 'diff({})'.format(label)
vdims.append(hv.Dimension(k, label=label))
if self.is_matched and not self.is_idmatched:
vdims += [hv.Dimension('match_distance', label='Match Distance [arcsec]')]
if self.is_multiband:
self._color_ds_dict = {}
for mag in self.mag_names:
self._color_ds_dict[mag] = self.color_ds(mag)
df = self.df.dropna(how='any')
elif self.is_multi_matched:
# reduce df appropriately here
coadd_cols = ['ra', 'dec', 'x', 'label'] + self.flags
visit_cols = list(set(self.df.columns.levels[0]) - set(coadd_cols))
df_swap = self.df.swaplevel(axis=1)
coadd_df = df_swap.loc[:, 'coadd'][coadd_cols]
visit_df = self.df[visit_cols].drop('coadd', axis=1, level=1)
dfs = [coadd_df, visit_df.std(axis=1, level=0).dropna(how='any')]
# This dropna thing is a problem when there are NaNs in flags.
# Solution: use subset=[...] to define the subset of columns to look for NaNs
subset_to_check = [c for c in df_swap['coadd'].columns if c not in [self.id_name] + self.flags]
df_dict = {k:df_swap[k].dropna(how='any', subset=subset_to_check).reset_index()
for k in ['coadd'] + self.catalog.visit_names}
self._ds_dict = {k:hv.Dataset(df_dict[k], kdims=kdims, vdims=vdims) for k in df_dict}
# Keep only rows that aren't nan in visit values
df = pd.concat(dfs, axis=1, join='inner')
else:
df = self.df.dropna(how='any')
ds = hv.Dataset(df.reset_index(), kdims=kdims, vdims=vdims)
self._ds = ds
def color_ds(self, mag):
"""Calculate holoviews.Dataset object containing colors for a given magnitude type
* Functor values and 'x' values come from catalog's reference filter.
* Flags are computed as the "or" of all bands.
* Labels are re-determined as follows:
* If object is a star in all bands, it is called a "star"
* If it is a star in zero bands, it is called a "noStar"
* If it is called a star in some bands but not all, it is called a "maybe"
"""
if not self.is_multiband:
return NotImplementedError('Can only get color_ds if catalog is a MultiBandCatalog')
if not isinstance(self.allfuncs[mag], Mag):
raise ValueError('Requested column must be a magnitude: {} requested'.format(mag))
color_df = self.df[['ra', 'dec']]
color_df.columns = color_df.columns.get_level_values(0)
filt = self.catalog.reference_filt
swap_df = self.df.swaplevel(axis=1)
# Get values for functors and 'x' from reference filter
func_keys = list(self.funcs.keys()) + ['x'] + [self.id_name]
color_df = pd.concat([color_df, swap_df[filt][func_keys]], axis=1)
# Compute flags as the "or" of all
flag_cols = [pd.Series(self.df[flag].max(axis=1).astype(bool), name=flag) for flag in self.flags]
color_df = pd.concat([color_df] + flag_cols, axis=1)
# Calculate group label
n = self.catalog.n_filters
n_star = (self.df['label']=='star').sum(axis=1)
label = pd.Series(pd.cut(n_star, [-1, 0, n-1 , n], labels=['noStar', 'maybe', 'star']),
index=n_star.index, name='label')
color_df['label'] = label
color_df = pd.concat([color_df, self.df['{}_color'.format(mag)]], axis=1)
# color_df = pd.concat([self.df[['ra', 'dec']],
# swap_df[filt],
# self.df['{}_color'.format(mag)]], axis=1)
# color_df = color_df.rename(columns={('ra', 'ra'):'ra', ('dec', 'dec'): 'dec'})
return hv.Dataset(color_df, kdims=self._get_kdims())
def visit_points(self, vdim, visit, x_max, label,
filter_range=None, flags=None, bad_flags=None):
"""Companion function to visit_explore that returns Points object
Parameters
----------
vdim : str
Name of dimension whose value gets colormapped.
visit, x_max, label : int, float, str
Parameters that become kdims in the holoviews.DynamicMap of
`visit_explore`.
filter_range, flags, bad_flags : dict, list, list
Parameters controlled by the `filter_stream` parameter
of `visit_explore`.
"""
if self.is_multi_matched:
try:
dset = self.get_ds(visit)
except KeyError:
dset = self.get_ds(str(visit))
else:
if visit != 'coadd':
raise ValueError('visit name must be "coadd"!')
dset = self.ds
dset = dset.select(x=(None, x_max), label=label)
# filter_range = {} if filter_range is None else filter_range
# flags = [] if flags is None else flags
# bad_flags = [] if bad_flags is None else bad_flags
dset = filter_dset(dset, filter_range=filter_range, flags=flags, bad_flags=bad_flags)
# dset = dset.redim(**{vdim:'y'})
vdims = [vdim, 'id', 'x']
if self.id_name is not None:
vdims.append(self.id_name)
pts = hv.Points(dset, kdims=['ra', 'dec'], vdims=vdims)
return pts.opts(plot={'color_index':vdim})
def visit_explore(self, vdim, x_range=np.arange(15,24.1,0.5), filter_stream=None,
range_override=None):
"""Dynamic map of values of a particular dimension, scrollable through visits
Parameters
----------
vdim : str
Name of dimension to explore.
x_range : array
Values of faint magnitude limit. Only points up to this limit will be plotted.
Beware of scrolling to too faint a limit; it might give you too many points!
filter_stream : explorer.plots.FilterStream, optional
Stream of constraints that controls what data to display. Useful to link
multiple plots together
range_override : (min, max), optional
By default the colormap will be scalled between the 0.005 to 0.995 quantiles
of the *entire* set of visits. Sometimes this is not a useful range to view,
so this parameter allows a custom colormap range to be set.
"""
if filter_stream is not None:
streams = [filter_stream]
else:
streams = []
fn = partial(QADataset.visit_points, self=self, vdim=vdim)
dmap = hv.DynamicMap(fn, kdims=['visit', 'x_max', 'label'],
streams=streams)
y_min = self.df[vdim].drop('coadd', axis=1).quantile(0.005).min()
y_max = self.df[vdim].drop('coadd', axis=1).quantile(0.995).max()
ra_min, ra_max = self.catalog.coadd_cat.ra.quantile([0, 1])
dec_min, dec_max = self.catalog.coadd_cat.dec.quantile([0, 1])
ranges = {vdim : (y_min, y_max),
'ra' : (ra_min, ra_max),
'dec' : (dec_min, dec_max)}
if range_override is not None:
ranges.update(range_override)
# Force visit names to be integers, if possible
try:
visit_names = [int(v) for v in self.catalog.visit_names]
visit_names.sort()
except:
visit_names = self.catalog.visit_names
dmap = dmap.redim.values(visit=visit_names,
# vdim=list(self.funcs.keys()) + ['match_distance'],
# vdim=[vdim],
label=['galaxy', 'star'],
x_max=x_range).redim.range(**ranges)
return dmap
def coadd_points(self, vdim, x_max, label, **kwargs):
"""Same as visit_points, but for coadd image.
"""
return self.visit_points(vdim, 'coadd', x_max, label, **kwargs)
def coadd_explore(self, vdim, x_range=np.arange(15,24.1,0.5), filter_stream=None,
range_override=None):
"""Dynamic map of coadd values
Parameters
----------
vdim : str
Name of dimension to explore.
x_range : array
Values of faint magnitude limit. Only points up to this limit will be plotted.
Beware of scrolling to too faint a limit; it might give you too many points!
filter_stream : explorer.plots.FilterStream, optional
Stream of constraints that controls what data to display. Useful to link
multiple plots together
range_override : (min, max), optional
By default the colormap will be scalled between the 0.005 to 0.995 quantiles
of the *entire* set of visits. Sometimes this is not a useful range to view,
so this parameter allows a custom colormap range to be set.
"""
if filter_stream is not None:
streams = [filter_stream]
else:
streams = []
fn = partial(QADataset.coadd_points, self=self, vdim=vdim)
dmap = hv.DynamicMap(fn, kdims=['x_max', 'label'],
streams=streams)
if self.is_multi_matched:
y_min = self.df[(vdim, 'coadd')].quantile(0.005)
y_max = self.df[(vdim, 'coadd')].quantile(0.995)
ra_min, ra_max = self.catalog.coadd_cat.ra.quantile([0, 1])
dec_min, dec_max = self.catalog.coadd_cat.dec.quantile([0, 1])
else:
y_min = self.df[vdim].quantile(0.005)
y_max = self.df[vdim].quantile(0.995)
ra_min, ra_max = self.catalog.ra.quantile([0, 1])
dec_min, dec_max = self.catalog.dec.quantile([0, 1])
ranges = {vdim : (y_min, y_max),
'ra' : (ra_min, ra_max),
'dec' : (dec_min, dec_max)}
if range_override is not None:
ranges.update(range_override)
# Force visit names to be integers, if possible
dmap = dmap.redim.values(label=['galaxy', 'star'],
x_max=x_range).redim.range(**ranges)
return dmap
def color_points(self, mag=None, xmax=21, label='star',
filter_range=None, flags=None, bad_flags=None,
x_range=None, y_range=None):
"""Datashaded layout of color-color plots
Parameters
----------
mag : str
Name of magnitude to get color info from (e.g., name of mag functor).
xmax : float
faint magnitude limit.
label : str
Desired label of points
filter_range, flags, bad_flags : dict, list, list
Parameters controlled by the `filter_stream` parameter
of `color_explore`.
x_range, y_range :
Arguments required for datashaded map to be dynamic when passed to a DynamicMap
"""
if mag is None:
mag = self.mag_names[0]
colors = self.catalog.colors
pts_list = []
for c1,c2 in zip(colors[:-1], colors[1:]):
dset = self.get_color_ds(mag).select(x=(0,xmax), label=label)
if filter_range is not None:
dset = filter_dset(dset, filter_range=filter_range, flags=flags, bad_flags=bad_flags)
pts_list.append(dset.to(hv.Points, kdims=[c1, c2], groupby=[]).redim.range(**{c1:(-0.2,1.5),
c2:(-0.2,1.5)}))
return hv.Layout([dynspread(datashade(pts, dynamic=False, x_range=x_range, y_range=y_range)) for pts in pts_list]).cols(2)
def color_explore(self, xmax_range=np.arange(18,26.1,0.5), filter_stream=None):
"""Dynamic map of color-color plots
Parameters
----------
xmax_range : array
Array of max magnitude values for slider widget
filter_stream : explorer.plots.FilterStream
Stream of constraints that controls what data to display. Useful to link
multiple plots together
"""
streams = [hv.streams.RangeXY()]
if filter_stream is not None:
streams += [filter_stream]
dmap = hv.DynamicMap(partial(QADataset.color_points, self=self), kdims=['mag', 'xmax', 'label'],
streams=streams)
dmap = dmap.redim.values(mag=self.mag_names, xmax=xmax_range, label=['star', 'maybe', 'noStar'])
return dmap
def color_points_fit(self, mag=None, colors='GRI', xmax=21, label='star',
filter_range=None, flags=None, bad_flags=None,
x_range=None, y_range=None, bounds=None, order=3):
"""Simple points + polynomial fit of selected range
This is more a simple proof-of-concept than anything particularly
useful at this point.
Parameters
----------
mag : str
Name of magnitude from which colors are desired.
colors : str
Color-color group desired (e.g., 'GRI', 'RIZ', 'IZY').
xmax : float
Maximum magnitude to allow
label : str
Desired label of points.
filter_range, flags, bad_flags : dict, list, list
Parameters controlled by the `filter_stream` parameter
of `color_fit_explore`.
x_range, y_range :
Arguments required for datashaded map to be dynamic when passed to a DynamicMap
(though right now this particular element does not get datashaded)
bounds : (l,b,r,t)
Bounds of selection box.
order : int
Order of polynomial fit
"""
if mag is None:
mag = self.mag_names[0]
c1 = '{}_{}'.format(*colors[0:2])
c2 = '{}_{}'.format(*colors[1:3])
dset = self.get_color_ds(mag).select(x=(0,xmax), label=label)
if filter_range is not None:
dset = filter_dset(dset, filter_range=filter_range, flags=flags, bad_flags=bad_flags)
pts = dset.to(hv.Points, kdims=[c1, c2], groupby=[]).redim.range(**{c1:(-0.2,1.5),
c2:(-0.2,1.5)})
# Fit selected region to polynomial and plot
if bounds is None:
fit = hv.Curve([])
else:
l,b,r,t = bounds
subdf = pts.data.query('({0} < {4} < {2}) and ({1} < {5} < {3})'.format(l,b,r,t,c1,c2))
coeffs = np.polyfit(subdf[c1], subdf[c2], order)
x_grid = np.linspace(subdf[c1].min(), subdf[c1].max(), 100)
y_grid = np.polyval(coeffs, x_grid)
# print(x_grid, y_grid)
fit = hv.Curve(np.array([x_grid, y_grid]).T).opts(style={'color':'black', 'width':3})
print(fit)
return pts * fit
# return dynspread(datashade(pts, dynamic=False, x_range=x_range, y_range=y_range)) * fit
def color_fit_explore(self, xmax_range=np.arange(18,26.1,0.5), filter_stream=None):
"""Dynamic map exploring polynomial fit to selected range of color-color plot
Parameters
----------
xmax_range : array
Array of max magnitude values for slider widget
filter_stream : explorer.plots.FilterStream
Stream of constraints that controls what data to display. Useful to link
multiple plots together
"""
streams = [hv.streams.RangeXY(), hv.streams.BoundsXY()]
if filter_stream is not None:
streams += [filter_stream]
dmap = hv.DynamicMap(partial(QADataset.color_points_fit, self=self), kdims=['colors','mag', 'xmax', 'label'],
streams=streams)
dmap = dmap.redim.values(mag=self.mag_names, xmax=xmax_range,
label=['star', 'maybe', 'noStar'],
colors=self.catalog.color_colors)
return dmap
|
mit
|
clingsz/GAE
|
main.py
|
1
|
1502
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 10 12:38:40 2017
@author: cling
"""
def summarizing_cross_validation():
import misc.cv.collect_ND5_3 as cv
cv.fig_boxplot_cverr()
def test_trainer():
import misc.data_gen as dg
import gae.model.trainer as tr
data = dg.get_training_data()
model = tr.build_gae()
model.train(data['X'],data['Y'])
def plot_immune_age():
import test.visualizations as vis
vis.plot_immune_age()
def plot_MIGS():
import test.visualizations as vis
vis.plot_cyto_age(cyto_name = 'IL6')
# vis.plot_cyto_age(cyto_name = 'IL1B')
# vis.Jacobian_analysis()
def distribution_test():
import immuAnalysis.distribution_test as dt
dt.show_hist(range(3,53),'dist_cyto.pdf')
dt.show_hist(range(53,78),'dist_cell.pdf')
def cell_cluster():
import immuAnalysis.cytokine_clustering as cc
## cc.main()
## cc.pvclust_main()
cc.agclust_main()
# B = [10,20,50,100,1000]
## cc.gap_stats(B)
# import gfmatplotlib.pyplot as plt
# plt.figure(figsize=[5*4,5])
# for i in range(5):
# plt.subplot(1,5,i+1)
# cc.show_gapstats(B[i])
# plt.tight_layout()
# plt.show()
# cc.generate_data_for_pvclust()
# cc.choose_cluster()
#import immuAnalysis.module_genes as mg
if __name__ == '__main__':
test_trainer()
# summarizing_cross_validation()
# import immuAnalysis.clustering as c
##
# c.test()
# import immuAnalysis.gene_analysis_ann as g
# g.summarize()
|
gpl-3.0
|
bennlich/scikit-image
|
skimage/io/manage_plugins.py
|
7
|
10329
|
"""Handle image reading, writing and plotting plugins.
To improve performance, plugins are only loaded as needed. As a result, there
can be multiple states for a given plugin:
available: Defined in an *ini file located in `skimage.io._plugins`.
See also `skimage.io.available_plugins`.
partial definition: Specified in an *ini file, but not defined in the
corresponding plugin module. This will raise an error when loaded.
available but not on this system: Defined in `skimage.io._plugins`, but
a dependent library (e.g. Qt, PIL) is not available on your system.
This will raise an error when loaded.
loaded: The real availability is determined when it's explicitly loaded,
either because it's one of the default plugins, or because it's
loaded explicitly by the user.
"""
try:
from configparser import ConfigParser # Python 3
except ImportError:
from ConfigParser import ConfigParser # Python 2
import os.path
from glob import glob
from .collection import imread_collection_wrapper
__all__ = ['use_plugin', 'call_plugin', 'plugin_info', 'plugin_order',
'reset_plugins', 'find_available_plugins', 'available_plugins']
# The plugin store will save a list of *loaded* io functions for each io type
# (e.g. 'imread', 'imsave', etc.). Plugins are loaded as requested.
plugin_store = None
# Dictionary mapping plugin names to a list of functions they provide.
plugin_provides = {}
# The module names for the plugins in `skimage.io._plugins`.
plugin_module_name = {}
# Meta-data about plugins provided by *.ini files.
plugin_meta_data = {}
# For each plugin type, default to the first available plugin as defined by
# the following preferences.
preferred_plugins = {
# Default plugins for all types (overridden by specific types below).
'all': ['pil', 'matplotlib', 'qt', 'freeimage'],
'imshow': ['matplotlib']
}
def _clear_plugins():
"""Clear the plugin state to the default, i.e., where no plugins are loaded
"""
global plugin_store
plugin_store = {'imread': [],
'imsave': [],
'imshow': [],
'imread_collection': [],
'_app_show': []}
_clear_plugins()
def _load_preferred_plugins():
# Load preferred plugin for each io function.
io_types = ['imsave', 'imshow', 'imread_collection', 'imread']
for p_type in io_types:
_set_plugin(p_type, preferred_plugins['all'])
plugin_types = (p for p in preferred_plugins.keys() if p != 'all')
for p_type in plugin_types:
_set_plugin(p_type, preferred_plugins[p_type])
def _set_plugin(plugin_type, plugin_list):
for plugin in plugin_list:
if plugin not in available_plugins:
continue
try:
use_plugin(plugin, kind=plugin_type)
break
except (ImportError, RuntimeError, OSError):
pass
def reset_plugins():
_clear_plugins()
_load_preferred_plugins()
def _parse_config_file(filename):
"""Return plugin name and meta-data dict from plugin config file."""
parser = ConfigParser()
parser.read(filename)
name = parser.sections()[0]
meta_data = {}
for opt in parser.options(name):
meta_data[opt] = parser.get(name, opt)
return name, meta_data
def _scan_plugins():
"""Scan the plugins directory for .ini files and parse them
to gather plugin meta-data.
"""
pd = os.path.dirname(__file__)
config_files = glob(os.path.join(pd, '_plugins', '*.ini'))
for filename in config_files:
name, meta_data = _parse_config_file(filename)
plugin_meta_data[name] = meta_data
provides = [s.strip() for s in meta_data['provides'].split(',')]
valid_provides = [p for p in provides if p in plugin_store]
for p in provides:
if not p in plugin_store:
print("Plugin `%s` wants to provide non-existent `%s`."
" Ignoring." % (name, p))
# Add plugins that provide 'imread' as provider of 'imread_collection'.
need_to_add_collection = ('imread_collection' not in valid_provides and
'imread' in valid_provides)
if need_to_add_collection:
valid_provides.append('imread_collection')
plugin_provides[name] = valid_provides
plugin_module_name[name] = os.path.basename(filename)[:-4]
_scan_plugins()
def find_available_plugins(loaded=False):
"""List available plugins.
Parameters
----------
loaded : bool
If True, show only those plugins currently loaded. By default,
all plugins are shown.
Returns
-------
p : dict
Dictionary with plugin names as keys and exposed functions as
values.
"""
active_plugins = set()
for plugin_func in plugin_store.values():
for plugin, func in plugin_func:
active_plugins.add(plugin)
d = {}
for plugin in plugin_provides:
if not loaded or plugin in active_plugins:
d[plugin] = [f for f in plugin_provides[plugin]
if not f.startswith('_')]
return d
available_plugins = find_available_plugins()
def call_plugin(kind, *args, **kwargs):
"""Find the appropriate plugin of 'kind' and execute it.
Parameters
----------
kind : {'imshow', 'imsave', 'imread', 'imread_collection'}
Function to look up.
plugin : str, optional
Plugin to load. Defaults to None, in which case the first
matching plugin is used.
*args, **kwargs : arguments and keyword arguments
Passed to the plugin function.
"""
if not kind in plugin_store:
raise ValueError('Invalid function (%s) requested.' % kind)
plugin_funcs = plugin_store[kind]
if len(plugin_funcs) == 0:
msg = ("No suitable plugin registered for %s.\n\n"
"You may load I/O plugins with the `skimage.io.use_plugin` "
"command. A list of all available plugins can be found using "
"`skimage.io.plugins()`.")
raise RuntimeError(msg % kind)
plugin = kwargs.pop('plugin', None)
if plugin is None:
_, func = plugin_funcs[0]
else:
_load(plugin)
try:
func = [f for (p, f) in plugin_funcs if p == plugin][0]
except IndexError:
raise RuntimeError('Could not find the plugin "%s" for %s.' %
(plugin, kind))
return func(*args, **kwargs)
def use_plugin(name, kind=None):
"""Set the default plugin for a specified operation. The plugin
will be loaded if it hasn't been already.
Parameters
----------
name : str
Name of plugin.
kind : {'imsave', 'imread', 'imshow', 'imread_collection'}, optional
Set the plugin for this function. By default,
the plugin is set for all functions.
See Also
--------
available_plugins : List of available plugins
Examples
--------
To use Matplotlib as the default image reader, you would write:
>>> from skimage import io
>>> io.use_plugin('matplotlib', 'imread')
To see a list of available plugins run ``io.available_plugins``. Note that
this lists plugins that are defined, but the full list may not be usable
if your system does not have the required libraries installed.
"""
if kind is None:
kind = plugin_store.keys()
else:
if not kind in plugin_provides[name]:
raise RuntimeError("Plugin %s does not support `%s`." %
(name, kind))
if kind == 'imshow':
kind = [kind, '_app_show']
else:
kind = [kind]
_load(name)
for k in kind:
if not k in plugin_store:
raise RuntimeError("'%s' is not a known plugin function." % k)
funcs = plugin_store[k]
# Shuffle the plugins so that the requested plugin stands first
# in line
funcs = [(n, f) for (n, f) in funcs if n == name] + \
[(n, f) for (n, f) in funcs if n != name]
plugin_store[k] = funcs
def _inject_imread_collection_if_needed(module):
"""Add `imread_collection` to module if not already present."""
if not hasattr(module, 'imread_collection') and hasattr(module, 'imread'):
imread = getattr(module, 'imread')
func = imread_collection_wrapper(imread)
setattr(module, 'imread_collection', func)
def _load(plugin):
"""Load the given plugin.
Parameters
----------
plugin : str
Name of plugin to load.
See Also
--------
plugins : List of available plugins
"""
if plugin in find_available_plugins(loaded=True):
return
if not plugin in plugin_module_name:
raise ValueError("Plugin %s not found." % plugin)
else:
modname = plugin_module_name[plugin]
plugin_module = __import__('skimage.io._plugins.' + modname,
fromlist=[modname])
provides = plugin_provides[plugin]
for p in provides:
if p == 'imread_collection':
_inject_imread_collection_if_needed(plugin_module)
elif not hasattr(plugin_module, p):
print("Plugin %s does not provide %s as advertised. Ignoring." %
(plugin, p))
continue
store = plugin_store[p]
func = getattr(plugin_module, p)
if not (plugin, func) in store:
store.append((plugin, func))
def plugin_info(plugin):
"""Return plugin meta-data.
Parameters
----------
plugin : str
Name of plugin.
Returns
-------
m : dict
Meta data as specified in plugin ``.ini``.
"""
try:
return plugin_meta_data[plugin]
except KeyError:
raise ValueError('No information on plugin "%s"' % plugin)
def plugin_order():
"""Return the currently preferred plugin order.
Returns
-------
p : dict
Dictionary of preferred plugin order, with function name as key and
plugins (in order of preference) as value.
"""
p = {}
for func in plugin_store:
p[func] = [plugin_name for (plugin_name, f) in plugin_store[func]]
return p
|
bsd-3-clause
|
xyguo/scikit-learn
|
examples/cluster/plot_mini_batch_kmeans.py
|
86
|
4092
|
"""
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
k_means_cluster_centers = np.sort(k_means.cluster_centers_, axis=0)
mbk_means_cluster_centers = np.sort(mbk.cluster_centers_, axis=0)
k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for k in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
|
bsd-3-clause
|
DCBIA-OrthoLab/ShapeVariationAnalyzer
|
src/py/generatelib/generate_polys.py
|
2
|
10177
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from scipy import stats
import vtk
import os
import argparse
import timeit
import pickle
import random
from imblearn.over_sampling import SMOTE
import matplotlib.pyplot as plt
import pprint
import inputData
from sklearn.decomposition import PCA
import math
import inputData
import glob
import numpy as np
# #############################################################################
# Generate data
parser = argparse.ArgumentParser(description='Shape Variation Analyzer', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#parser.add_argument('--model', type=str, help='pickle file with the pca decomposition', required=True)
#parser.add_argument('--shapeDir', type=str, help='Directory with vtk files .vtk', required=True)
parser.add_argument('--dataPath', action='store', dest='dirwithSub', help='folder with subclasses', required=True)
parser.add_argument('--template', help='Sphere template, computed using SPHARM-PDM', type=str, required=True)
parser.add_argument('--train_size', help='train ratio', type=float, default=0.8)
parser.add_argument('--validation_size', help='validation ratio from test data', default=0.5, type=float)
parser.add_argument('--out', dest="pickle_file", help='Pickle file output', default="datasets.pickle", type=str)
def writeData(data_for_training,outputdataPath):
#write data in a vtk file
vtkdirshapes = os.listdir(outputdataPath)
for vtkfilename in vtkdirshapes:
if vtkfilename.endswith((".vtk")):
print("Writing", vtkfilename)
writer = vtk.vtkPolyDataWriter()
writer.SetInput(data_for_training)
writer.SetFileName(os.path.join(outputdataPath),vtkfilename)
writer.Write()
def get_conversion_matrices(geometry):
points_to_cells = np.zeros((geometry.GetNumberOfCells(), geometry.GetNumberOfPoints()))
for cid in range(geometry.GetNumberOfCells()):
pointidlist = vtk.vtkIdList()
geometry.GetCellPoints(cid, pointidlist)
for pid in range(pointidlist.GetNumberOfIds()):
points_to_cells[cid][pointidlist.GetId(pid)] = 1
cells_to_points = np.zeros((geometry.GetNumberOfPoints(), geometry.GetNumberOfCells()))
for pid in range(geometry.GetNumberOfPoints()):
pointidlist = vtk.vtkIdList()
geometry.GetPointCells(pid, pointidlist)
for cid in range(pointidlist.GetNumberOfIds()):
cells_to_points[pid][pointidlist.GetId(cid)] = 1
return points_to_cells, cells_to_points
def get_normals(vtkclassdict):
inputdata = inputData.inputData()
labels = []
dataset_concatenated = []
# This looks really confusing but is really not
for folderclass, vtklist in vtkclassdict.items():
try:
with open(folderclass + ".pickle",'rb') as f:
dataset=pickle.load(f)
normal_features = []
for vtkfilename in vtklist:
#We'll load the same files and get the normals
features = inputdata.load_features(vtkfilename, feature_points=["Normals"])
normal_features.append(features)
#This reshaping stuff is to get the list of points, i.e., all connected points
#and the corresponding label which is the normal in this case
#The data in the dataset contains lists with different sizes
normal_features = np.array(normal_features)
featshape = np.shape(normal_features)
labels.extend(normal_features.reshape(featshape[0], featshape[1], -1))
dsshape = np.shape(dataset)
dataset_concatenated.extend(dataset.reshape(dsshape[0], dsshape[2], dsshape[3], -1))
except Exception as e:
print('Unable to process', pickle_file,':',e)
raise
# lens = np.array([len(dataset_concatenated[i]) for i in range(len(dataset_concatenated))])
# mask = np.arange(lens.max()) < lens[:,None]
# padded = np.zeros(mask.shape + (3,))
# padded[mask] = np.vstack((dataset_concatenated[:]))
# return np.array(padded), np.array(labels)
return np.array(dataset_concatenated), np.array(labels)
def get_labels(pickle_file):
#get labels of a dataset and returns the labels array and the dataset with features
#num_classes=len(pickle_file)
#num_shapes = 268 #should be changed!!
labels = []
shape =[]
dataset_concatenated =[]
for label, pickle_file in enumerate(pickle_file):
try:
with open(pickle_file,'rb') as f:
dataset=pickle.load(f)
shape_dataset = np.shape(dataset)
num_shapes_per_group = shape_dataset[0]
l=[label]*num_shapes_per_group
labels.extend(l)
dataset_concatenated.extend(dataset)
except Exception as e:
print('Unable to process', pickle_file,':',e)
raise
features=np.array(dataset_concatenated)
shape_features=np.shape(features)
return features.reshape(-1,shape_features[1]*shape_features[2]), np.array(labels)
def generate_data(pca_model):
#generate data thanks to pca decomposition (not used)
print("Generating data ...")
pca = pca_model["pca"]
X_ = pca_model["X_"]
X_pca_ = pca_model["X_pca_"]
X_pca_var = pca_model["X_pca_var"]
print('Variance',X_pca_var)
print('Mean',X_pca_)
#between -1 and 1
alpha = 2.0*(np.random.random_sample(np.size(X_pca_))) - 1.0
print('alpha', alpha)
data_compressed = 1.5*X_pca_var * alpha + X_pca_
print('data compressed',data_compressed)
data_generated = pca.inverse_transform(data_compressed) + X_
return data_generated
def generate_with_SMOTE(dataset,labels):
#generate data thanks to SMOTE algorithm, it balances different groups
sm=SMOTE(kind='regular')
print('shape dataset',dataset.shape)
print('shape labels',labels.shape)
dataset_res, labels_res = sm.fit_sample(dataset,labels)
print('shape dataset resampled',np.shape(dataset_res),'shape lables resampled',np.shape(labels_res))
return dataset_res,labels_res
def PCA_plot(dataset,labels,dataset_res,labels_res):
#plot original dat and data resampled after a PCA decomposition
pca = PCA(n_components=200)
pca.fit(dataset)
dataset_pca=pca.transform(dataset)
print('original shape: ',dataset.shape)
print('transformed shape:',dataset_pca.shape)
#print('Ratio variance',pca.explained_variance_ratio_)
#plt.scatter(dataset[:,0],dataset[:,1],alpha=0.2)
#dataset_new = pca.inverse_transform(dataset_pca)
plt.figure(2)
plt.subplot(121)
plt.scatter(dataset_pca[:,0],dataset_pca[:,1],edgecolor='none',alpha=0.5,c=labels,cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(labels))[0]))
plt.title('Original data with pca (' + str(dataset.shape[0]) + ' samples)')
#pca.fit(dataset_res)
dataset_res_pca=pca.transform(dataset_res)
plt.subplot(122)
plt.scatter(dataset_res_pca[:,0],dataset_res_pca[:,1],edgecolor='none',alpha=0.5,c=labels_res,cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(labels_res))[0]))
plt.title('Resampled data with pca (' + str(dataset_res_pca.shape[0]) + ' samples)')
for i in range(1,3):
plt.subplot(1,2,i)
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar()
cumsum = np.cumsum(pca.explained_variance_ratio_)
plt.figure(1)
plt.plot(cumsum)
plt.xlabel('nb of components')
plt.ylabel('cumulative explained variance')
plt.axhline(y=0.95, linestyle=':', label='.95 explained', color="#f23e3e")
numcomponents = len(np.where(cumsum < 0.95)[0])
plt.axvline(x=numcomponents, linestyle=':', label=(str(numcomponents) + ' components'), color="#31f9ad")
plt.legend(loc=0)
histo = np.bincount(labels)
histo_range = np.array(range(histo.shape[0]))
plt.figure(3)
plt.bar(histo_range, histo)
plt.xlabel('Groups')
plt.ylabel('Number of samples')
for xy in zip(histo_range, histo):
plt.annotate(xy[1], xy=xy, ha="center", color="#4286f4")
plt.show()
if __name__ == '__main__':
np.set_printoptions(threshold='nan')
args = parser.parse_args()
dataPath=args.dirwithSub
pickle_file = args.pickle_file
template = args.template
reader = vtk.vtkPolyDataReader()
reader.SetFileName(template)
reader.Update()
points_to_cells, cells_to_points = get_conversion_matrices(reader.GetOutput())
# Get the data from the folders with vtk files
inputdata = inputData.inputData()
data_folders = inputdata.get_folder_classes_list(dataPath)
pickled_datasets = inputdata.maybe_pickle(data_folders, 5, feature_polys=["Points"])
# Create the labels, i.e., enumerate the groups
vtklistdict = inputdata.get_vtklist(data_folders)
dataset,labels = get_normals(vtklistdict)
# Comput the total number of shapes and train/test size
total_number_shapes=dataset.shape[0]
num_train = int(args.train_size*total_number_shapes)
num_valid = int((total_number_shapes - num_train)*args.validation_size)
# Randomize the original dataset
shuffled_dataset, shuffled_labels = inputdata.randomize(dataset, labels)
dataset_res = shuffled_dataset
labels_res = shuffled_labels
# SANITY CHECKS
print('dataset',np.shape(dataset))
print('labels',np.shape(labels))
print('dataset_res',np.shape(dataset_res))
print('labels_res',np.shape(labels_res))
print('num_train', num_train)
print('num_valid', num_valid)
print('num_test', total_number_shapes - num_valid - num_train)
print("points_to_cells", np.shape(points_to_cells))
print("cells_to_points", np.shape(cells_to_points))
# PCA_plot(dataset,labels,dataset_res,labels_res)
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': dataset_res,
'train_labels': labels_res,
'valid_dataset': dataset[num_train: num_train + num_valid],
'valid_labels': labels[num_train: num_train + num_valid],
'test_dataset': dataset[num_train + num_valid:],
'test_labels': labels[num_train + num_valid:],
'points_to_cells': points_to_cells,
'cells_to_points': cells_to_points
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
#f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
|
apache-2.0
|
soulmachine/scikit-learn
|
examples/ensemble/plot_adaboost_hastie_10_2.py
|
355
|
3576
|
"""
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
|
bsd-3-clause
|
clemkoa/scikit-learn
|
sklearn/datasets/tests/test_base.py
|
8
|
9532
|
import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets import load_wine
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
def test_default_load_files():
try:
setup_load_files()
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
finally:
teardown_load_files()
def test_load_files_w_categories_desc_and_encoding():
try:
setup_load_files()
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
finally:
teardown_load_files()
def test_load_files_wo_load_content():
try:
setup_load_files()
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
finally:
teardown_load_files()
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
X_y_tuple = load_digits(return_X_y=True)
bunch = load_digits()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread # noqa
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert_equal(len(res.feature_names), 10)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_diabetes(return_X_y=True)
bunch = load_diabetes()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_linnerud(return_X_y=True)
bunch = load_linnerud()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_iris(return_X_y=True)
bunch = load_iris()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_wine():
res = load_wine()
assert_equal(res.data.shape, (178, 13))
assert_equal(res.target.size, 178)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_wine(return_X_y=True)
bunch = load_wine()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_breast_cancer(return_X_y=True)
bunch = load_breast_cancer()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_boston(return_X_y=True)
bunch = load_boston()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
|
bsd-3-clause
|
bundgus/python-playground
|
matplotlib-playground/examples/api/line_with_text.py
|
1
|
1671
|
"""
Show how to override basic methods so an artist can contain another
artist. In this case, the line contains a Text instance to label it.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.transforms as mtransforms
import matplotlib.text as mtext
class MyLine(lines.Line2D):
def __init__(self, *args, **kwargs):
# we'll update the position when the line data is set
self.text = mtext.Text(0, 0, '')
lines.Line2D.__init__(self, *args, **kwargs)
# we can't access the label attr until *after* the line is
# inited
self.text.set_text(self.get_label())
def set_figure(self, figure):
self.text.set_figure(figure)
lines.Line2D.set_figure(self, figure)
def set_axes(self, axes):
self.text.set_axes(axes)
lines.Line2D.set_axes(self, axes)
def set_transform(self, transform):
# 2 pixel offset
texttrans = transform + mtransforms.Affine2D().translate(2, 2)
self.text.set_transform(texttrans)
lines.Line2D.set_transform(self, transform)
def set_data(self, x, y):
if len(x):
self.text.set_position((x[-1], y[-1]))
lines.Line2D.set_data(self, x, y)
def draw(self, renderer):
# draw my label at the end of the line with 2 pixel offset
lines.Line2D.draw(self, renderer)
self.text.draw(renderer)
fig, ax = plt.subplots()
x, y = np.random.rand(2, 20)
line = MyLine(x, y, mfc='red', ms=12, label='line label')
#line.text.set_text('line label')
line.text.set_color('red')
line.text.set_fontsize(16)
ax.add_line(line)
plt.show()
|
mit
|
eggplantbren/DNest4
|
python/dnest4/analysis.py
|
1
|
10972
|
# -*- coding: utf-8 -*-
import numpy as np
from .backends import CSVBackend
try:
basestring
except NameError:
stringtype = str
else:
stringtype = basestring
__all__ = ["postprocess", "make_plots"]
def postprocess(backend=None,
temperature=1.0, cut=0, compression_assert=None,
resample_log_X=0,
compression_bias_min=1, compression_scatter=0,
resample=0,
plot=False, plot_params=None):
# Deal with filename inputs.
if backend is None:
backend = "."
if isinstance(backend, stringtype):
backend = CSVBackend(backend)
# Unpack the backend's data.
levels = backend.levels
samples = backend.samples
sample_info = backend.sample_info
# Remove regularisation from levels if we asked for it.
if compression_assert is not None:
levels = np.array(levels)
levels["log_X"][1:] = \
-np.cumsum(compression_assert*np.ones(len(levels) - 1))
# Remove burn-in.
if cut > 0:
samples, sample_info = remove_burnin(samples, sample_info, cut)
# Subsample; one (randomly selected) particle for each time.
if len(sample_info.shape) > 1:
samples, sample_info = subsample_particles(samples, sample_info)
# Check dimensions.
assert len(samples) == len(sample_info), "dimension mismatch"
# Estimate the X values for the samples by interpolating from the levels.
if resample_log_X:
resample_count = resample_log_X
else:
resample_count = 1
log_z = np.empty(resample_count)
h = np.empty(resample_count)
n_eff = np.empty(resample_count)
log_post = np.empty((resample_count, len(sample_info)))
for i in range(resample_count):
# If requested, jitter the Xs of the levels.
if resample_log_X:
levels_2 = np.array(levels)
comp = -np.diff(levels_2["log_X"])
comp *= np.random.uniform(compression_bias_min, 1.0)
comp *= np.exp(compression_scatter*np.random.randn(len(comp)))
levels_2["log_X"][1:] = -comp
levels_2["log_X"] = np.cumsum(levels_2["log_X"])
else:
levels_2 = levels
sample_log_X = interpolate_samples(levels_2, sample_info,
resample=resample_log_X)
if i == 0:
backend.write_sample_log_X(sample_log_X)
log_z[i], h[i], n_eff[i], log_post[i] = compute_stats(
levels_2, sample_info, sample_log_X,
temperature=temperature,
)
# Re-sample the samples using the posterior weights.
log_post = logsumexp(log_post, axis=0) - np.log(resample_count)
backend.write_weights(np.exp(log_post))
if resample:
new_samples = generate_posterior_samples(
samples, log_post, int(resample * np.mean(n_eff))
)
backend.write_posterior_samples(new_samples)
log_post = np.zeros(len(new_samples))
else:
new_samples = samples
# Compute the final stats based on resampling.
stats = dict(
log_Z=np.mean(log_z), log_Z_std=np.std(log_z),
H=np.mean(h), H_std=np.std(h),
N_eff=np.mean(n_eff), N_eff_std=np.std(n_eff),
)
backend.write_stats(stats)
# Make the plots if requested.
if plot:
if plot_params is None:
plot_params = dict()
make_plots(backend, **plot_params)
return stats
def logsumexp(x, axis=None):
mx = np.max(x, axis=axis)
return np.log(np.sum(np.exp(x - mx), axis=axis)) + mx
def remove_burnin(samples, sample_info, nburn):
return (
samples[int(nburn * len(samples)):],
sample_info[int(nburn * len(sample_info)):],
)
def subsample_particles(samples, sample_info):
if len(samples.shape) == 2 and len(sample_info.shape) == 1:
return samples, sample_info
if len(sample_info.shape) != 2:
raise ValueError("invalid dimensions")
# Subsample; one (randomly selected) particle for each time.
if samples.shape[1] != sample_info.shape[1]:
raise ValueError("dimension mismatch")
n = np.prod(sample_info.shape)
return samples.reshape((n, -1)), sample_info.reshape(n)
# inds = (
# np.arange(len(samples)),
# np.random.randint(samples.shape[1], size=len(samples)),
# )
# return samples[inds], sample_info[inds]
def interpolate_samples(levels, sample_info, resample=False):
# Work out the level assignments. This looks horrifying because we need
# to take tiebreakers into account; if two levels (or samples) have
# exactly the same likelihood, then the tiebreaker decides the assignment.
lev, order = 0, 0
assign = np.empty(len(sample_info), dtype=int)
argsort = np.empty(len(sample_info), dtype=int)
l_set = zip(levels["log_likelihood"], levels["tiebreaker"],
-np.arange(1, len(levels)+1))
s_set = zip(sample_info["log_likelihood"], sample_info["tiebreaker"],
range(len(sample_info)))
for ll, _, ind in sorted(list(l_set) + list(s_set)):
if ind < 0:
lev = -ind - 1
continue
assign[ind] = lev
argsort[ind] = order
order += 1
# Loop over levels and place the samples within each level.
sample_log_X = np.empty(len(sample_info))
x_min = np.exp(np.append(levels["log_X"][1:], -np.inf))
x_max = np.exp(levels["log_X"])
dx = x_max - x_min
for i, lev in enumerate(levels):
# Use the level assignments to get a mask of sample IDs in the correct
# order.
m = assign == i
inds = np.arange(len(sample_info))[m][np.argsort(argsort[m])]
if resample:
# Re-sample the points uniformly---in X---between the level
# boundaries.
sample_log_X[inds] = np.sort(np.log(
np.random.uniform(x_min[i], x_max[i], size=len(inds))
))[::-1]
else:
# Place the samples uniformly---in X not log(X)---between the
# level boundaries.
N = len(inds)
# FIXME: there are two options here and we're using the backwards
# compatible one but the other might be better. Need to think
# about it further. It won't matter as the number of samples gets
# large.
n = ((np.arange(1, N+1)) / (N+1))[::-1]
# n = ((np.arange(N) + 0.5) / N)[::-1]
sample_log_X[inds] = np.log(x_min[i] + dx[i] * n)
return sample_log_X
def compute_stats(levels, sample_info, sample_log_X, temperature=1.0):
# Use the log(X) estimates for the levels and the samples to estimate
# log(Z) using the trapezoid rule.
log_x = np.append(levels["log_X"], sample_log_X)
log_y = np.append(levels["log_likelihood"], sample_info["log_likelihood"])
samp_inds = np.append(-np.ones(len(levels), dtype=int),
np.arange(len(sample_info)))
is_samp = np.append(
np.zeros(len(levels), dtype=bool),
np.ones(len(sample_info), dtype=bool)
)
inds = np.argsort(log_x)
log_x = log_x[inds]
log_y = log_y[inds] / temperature
samp_inds = samp_inds[inds]
is_samp = is_samp[inds]
# Extend to X=0.
log_x = np.append(-np.inf, log_x)
log_y = np.append(log_y[0], log_y)
# Compute log(exp(L_k+1) + exp(L_k)) using logsumexp rules...
d_log_y = log_y[1:] - log_y[:-1]
log_y_mean = np.log(0.5) + np.log(1+np.exp(d_log_y)) + log_y[:-1]
# ...and log(exp(log(X_k+1)) + exp(log(X_k))) using logsumexp rules.
log_x_diff = np.log(1. - np.exp(log_x[:-1] - log_x[1:])) + log_x[1:]
# Then from the trapezoid rule:
# log(Z) = log(0.5) + logsumexp(log_x_diff + log_y_mean)
log_p = log_x_diff + log_y_mean
log_z = logsumexp(log_p)
log_p -= log_z
# Compute the sample posterior weights. These are equal to:
# w_k = L_k / (0.5 * (X_k+1 - X_k-1)) / Z
# but we'll recompute Z not using the levels just to be safe.
log_prior = np.log(0.5) + np.logaddexp(log_x_diff[1:], log_x_diff[:-1])
log_post = np.array(sample_info["log_likelihood"])
log_post[samp_inds[samp_inds >= 0]] += log_prior[samp_inds[:-1] >= 0]
log_post -= logsumexp(log_post)
# Compute the information and effective sample size.
h = -log_z + np.sum(np.exp(log_post) * sample_info["log_likelihood"])
n_eff = np.exp(-np.sum(np.exp(log_post)*log_post))
return log_z, h, n_eff, log_post
def generate_posterior_samples(samples, log_weights, N):
w = np.exp(log_weights - logsumexp(log_weights))
inds = np.random.choice(np.arange(len(samples)), size=int(N), p=w)
return samples[inds]
def make_plots(backend):
figs = dict()
figs["levels"] = make_levels_plot(backend)
figs["compression"] = make_compression_plot(backend)
figs["log_X_log_L"] = make_log_X_log_L_plot(backend)
return figs
def make_levels_plot(backend):
import matplotlib.pyplot as pl
fig, ax = pl.subplots(1, 1)
ax.plot(backend.sample_info["level_assignment"], color="k")
ax.set_xlabel("Iterations")
ax.set_ylabel("Level")
return fig
def make_compression_plot(backend):
import matplotlib.pyplot as pl
fig, axes = pl.subplots(2, 1, sharex=True)
levels = backend.levels
ax = axes[0]
ax.plot(np.diff(levels["log_X"]), color="k")
ax.axhline(-1., color="g")
ax.axhline(-np.log(10.), color="g", linestyle="--")
ax.set_ylim(ymax=0.05)
ax.set_ylabel("Compression")
ax = axes[1]
m = levels["tries"] > 0
ax.plot(np.arange(len(levels))[m],
levels[m]["accepts"]/levels[m]["tries"],
"ko-")
ax.set_ylabel("MH Acceptance")
ax.set_xlabel("level")
ax.set_ylim([0.0, 1.0])
return fig
def make_log_X_log_L_plot(backend):
import matplotlib.pyplot as pl
fig, axes = pl.subplots(2, 1, sharex=True)
levels = backend.levels
sample_info = backend.sample_info
sample_log_X = backend.sample_log_X
weights = backend.weights
ax = axes[0]
ax.plot(sample_log_X.flatten(), sample_info["log_likelihood"].flatten(),
"k.", label="Samples")
ax.plot(levels["log_X"][1:], levels["log_likelihood"][1:], "g.",
label="Levels")
ax.legend(numpoints=1, loc="lower left")
ax.set_ylabel("log(L)")
ax.set_title("log(Z) = {0}".format(backend.stats["log_Z"]))
# Use all plotted logl values to set ylim
combined_logl = np.hstack([sample_info["log_likelihood"],\
levels["log_likelihood"][1:]])
combined_logl = np.sort(combined_logl)
lower = combined_logl[int(0.1*combined_logl.size)]
upper = combined_logl[-1]
diff = upper - lower
lower -= 0.05*diff
upper += 0.05*diff
ax.set_ylim([lower, upper])
ax = axes[1]
ax.plot(sample_log_X, weights, "k.")
ax.set_ylabel("posterior weight")
ax.set_xlabel("log(X)")
return fig
|
mit
|
passoir/trading-with-python
|
sandbox/spreadCalculations.py
|
78
|
1496
|
'''
Created on 28 okt 2011
@author: jev
'''
from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener
from tradingWithPython.lib import yahooFinance
from pandas import DataFrame, Series
import numpy as np
import matplotlib.pyplot as plt
import os
symbols = ['SPY','IWM']
y = yahooFinance.HistData('temp.csv')
y.startDate = (2007,1,1)
df = y.loadSymbols(symbols,forceDownload=False)
#df = y.downloadData(symbols)
res = readBiggerScreener('CointPairs.csv')
#---check with spread scanner
#sp = DataFrame(index=symbols)
#
#sp['last'] = df.ix[-1,:]
#sp['targetCapital'] = Series({'SPY':100,'IWM':-100})
#sp['targetShares'] = sp['targetCapital']/sp['last']
#print sp
#The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero)
#s = Spread(symbols, histClose = df)
#print s
#s.value.plot()
#print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns')
#print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log')
#print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard')
#p = Portfolio(df)
#p.setShares([1, -1.7])
#p.value.plot()
quote = yahooFinance.getQuote(symbols)
print quote
s = Spread(symbols,histClose=df, estimateBeta = False)
s.setLast(quote['last'])
s.setShares(Series({'SPY':1,'IWM':-1.7}))
print s
#s.value.plot()
#s.plot()
fig = figure(2)
s.plot()
|
bsd-3-clause
|
jorik041/scikit-learn
|
examples/model_selection/grid_search_digits.py
|
227
|
2665
|
"""
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
|
bsd-3-clause
|
lesteve/clusterlib
|
doc/sphinxext/gen_rst.py
|
3
|
38959
|
"""
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
# Obtain from scikit-learn package
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
from textwrap import dedent
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = open(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery",
"Please check your example's layout",
" and make sure it's correct")
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
width: 0px;
overflow: hidden;
}
</style>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = open(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and check_docstring):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer">
<div class="docstringWrapper">
""")
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
<p>%s
</p></div>
</div>
""" % (ref_name, snippet))
return ''.join(out)
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(dir, 'images', 'thumb')):
os.makedirs(os.path.join(dir, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(dir, dir, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (dir, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', dir)
ex_file.write(_thumbnail_div(dir, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
time_m = 0
time_s = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_mngr.num)
plt.savefig(image_path % fig_mngr.num)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
example_code_obj = identify_names(open(example_file).read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
try:
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding due to a URL Error: \n")
print(e.args)
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
|
bsd-3-clause
|
zhuango/python
|
pandasLearning/featureEng_for_ning.py
|
2
|
4108
|
import numpy as np
import pandas as pd
from math import *
def getMinDistances(tasks, vips):
distances = []
for longitude, latitude in zip(tasks['任务gps经度'], tasks['任务gps纬度']):
minDistance = 100000000000000
for longVip, latiVip in zip(vips['会员gps经度'], vips['会员gps纬度']):
# print(longitude, latitude, longVip, latiVip)
dis = cal_dist(longitude, latitude, longVip, latiVip)
if minDistance > dis:
minDistance = dis
distances.append(minDistance)
return distances
def getMinute(timeStr):
"""6:30:00"""
items = [int(elem) for elem in timeStr.split(":")]
minutes = items[0] * 60 + items[1]
return minutes
def cal_dist(lon1, lat1, lon2, lat2): # 经度1,纬度1,经度2,纬度2 (十进制度数)
"""
求AB两点经纬度之间的距离
"""
# 将十进制度数转化为弧度
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine公式
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 6371 # 地球平均半径,单位为公里
return c * r * 1000
def addVipAroundCount(tasks, vips, thres=0.324208925969, ss='sss'):
vipsAroundCounts = []
averageBeginningTimes = []
averageCredits = []
averageLimit = []
for longitude, latitude in zip(tasks['任务gps经度'], tasks['任务gps纬度']):
count = 0
totalTime = 0
totalCredit = 0
totalLimit = 0
for longVip, latiVip, timeStr, credit, limit in zip(vips['会员gps经度'], vips['会员gps纬度'], vips['预订任务开始时间'], vips['信誉值'], vips['预订任务限额']):
# print(longitude, latitude, longVip, latiVip)
dis = cal_dist(longitude, latitude, longVip, latiVip)
time = getMinute(str(timeStr))
if dis <= thres:
count += 1
totalTime += time
totalCredit += credit
totalLimit += limit
vipsAroundCounts.append(count)
if count == 0:
averageBeginningTimes.append(getMinute('8:00:00'))
averageCredits.append(0.0)
averageLimit.append(0.0)
else:
averageBeginningTimes.append(totalTime / count)
averageCredits.append(totalCredit / count)
averageLimit.append(totalLimit / count)
tasks['vip_count_around_' + str(ss)] = vipsAroundCounts
# tasks['averaged_begining_time_' + str(thres)] = averageBeginningTimes
tasks['averaged_credit_' + str(ss)] = averageCredits
tasks['averaged_limit' + str(ss)] = averageLimit
return vipsAroundCounts,
def factorizeLocation(tasks):
tasks['位置_factorized'] = pd.factorize(tasks['位置'])[0]
def mapLocation(tasks, loc2id):
tasks['位置_factorized'] = [loc2id[loc.strip()] for loc in tasks['位置']]
def buildLocationDict(filename):
loc2id = {}
locations = pd.read_csv(filename)
for location, location_id in zip(locations['位置'], locations['位置_factorized']):
loc2id[location] = location_id
return loc2id
# tasks: 任务号码,任务gps经度,任务gps纬度,位置,任务标价
# vips: 会员编号,会员gps纬度,会员gps纬度,预订任务限额,预订任务开始时间,信誉值
tasks = pd.read_csv('/home/laboratory/Desktop/math/q4.csv', header=0)
print(tasks.info())
vips = pd.read_csv('vips.csv', header=0)
print(vips.info())
distances = getMinDistances(tasks, vips)
addVipAroundCount(tasks, vips, 33934.34627910165, 33934.34627910165)
addVipAroundCount(tasks, vips, 16967.173139550825, 16967.173139550825)
# factorizeLocation(tasks)
# loc2id = buildLocationDict('/home/laboratory/Desktop/math/featured_tasks.csv')
# print(len(loc2id))
# for loc in loc2id:
# print("{},{}".format(loc, loc2id[loc]))
# mapLocation(tasks, loc2id)
tasks.to_excel('./ning/featured_tasks_ning_q4.xls', index=False)
tasks.to_csv('./ning/featured_tasks_ning_q4.csv', index=False)
|
gpl-2.0
|
tosolveit/scikit-learn
|
sklearn/ensemble/tests/test_gradient_boosting.py
|
56
|
37976
|
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
|
bsd-3-clause
|
Jimmy-Morzaria/scikit-learn
|
examples/cluster/plot_kmeans_silhouette_analysis.py
|
242
|
5885
|
"""
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
|
bsd-3-clause
|
tbenthompson/tectosaur
|
examples/check_RH3.py
|
1
|
4161
|
import sys
import numpy as np
import matplotlib.pyplot as plt
import tectosaur.mesh.find_near_adj as find_near_adj
from tectosaur.constraint_builders import continuity_constraints, \
free_edge_constraints, all_bc_constraints
from tectosaur.ops.sparse_integral_op import SparseIntegralOp
from tectosaur.ops.sparse_farfield_op import PtToPtDirectFarfieldOp, \
PtToPtFMMFarfieldOp, TriToTriDirectFarfieldOp
from tectosaur.ops.dense_integral_op import DenseIntegralOp
from tectosaur.ops.mass_op import MassOp
from tectosaur.ops.composite_op import CompositeOp
from tectosaur.ops.sum_op import SumOp
from tectosaur.util.timer import Timer
from okada import Okada, build_constraints, abs_fault_slip
import solve
from tectosaur.util.logging import setup_root_logger
logger = setup_root_logger(__name__)
def build_and_solve_T(data):
allow_nearfield = True
near_threshold = 2.0
if not allow_nearfield:
if any_nearfield(
data.all_mesh[0], data.all_mesh[1],
data.surf_tri_idxs, data.fault_tri_idxs,
near_threshold
):
raise Exception("nearfield interactions not allowed!")
else:
print('good. all interactions are farfield.')
cs = build_constraints(
data.surface_tris, data.fault_tris, data.all_mesh[0],
abs_fault_slip
)
op_type = SparseIntegralOp
# op_type = DenseIntegralOp
T_op = SparseIntegralOp(
6, 2, 5, near_threshold,
'elasticT3', data.k_params, data.all_mesh[0], data.all_mesh[1],
data.float_type,
farfield_op_type = PtToPtFMMFarfieldOp(150, 3.0, 450),
obs_subset = data.surf_tri_idxs,
src_subset = data.surf_tri_idxs,
)
mass_op = MassOp(3, data.all_mesh[0], data.all_mesh[1])
T_op_fault_to_surf = SparseIntegralOp(
6, 2, 5, near_threshold,
'elasticT3', data.k_params, data.all_mesh[0], data.all_mesh[1],
data.float_type,
farfield_op_type = PtToPtDirectFarfieldOp,
obs_subset = data.surf_tri_idxs,
src_subset = data.fault_tri_idxs,
)
def replace_K_name(*args):
args = list(args)
args[1] = 'elasticRT3'
return TriToTriDirectFarfieldOp(*args)
# args[1] = 'elasticT3'
# return PtToPtDirectFarfieldOp(*args)
# return TriToTriDirectFarfieldOp(*args)
T_op_fault_to_surf2 = DenseIntegralOp(
6, 2, 10, near_threshold,
'elasticRT3', data.k_params, data.all_mesh[0], data.all_mesh[1],
data.float_type,
obs_subset = data.surf_tri_idxs,
src_subset = data.fault_tri_idxs,
)
# T_op_fault_to_surf2 = SparseIntegralOp(
# 6, 2, 5, near_threshold,
# 'elasticRT3', data.k_params, data.all_mesh[0], data.all_mesh[1],
# data.float_type,
# farfield_op_type = replace_K_name,
# obs_subset = data.surf_tri_idxs,
# src_subset = data.fault_tri_idxs,
# )
# T_op_fault_to_surf2 = TriToTriDirectFarfieldOp(
# 2, 'elasticRT3', data.k_params, data.all_mesh[0], data.all_mesh[1],
# data.float_type, obs_subset = data.surf_tri_idxs,
# src_subset = data.fault_tri_idxs
# )
slip = get_fault_slip(data.all_mesh[0], data.fault_tris).reshape(-1)
A = T_op_fault_to_surf.dot(slip).reshape((-1,3,3))
B = T_op_fault_to_surf2.dot(slip).reshape((-1,3,3))
ratio = A / B
import ipdb
ipdb.set_trace()
iop = CompositeOp(
(mass_op, 0, 0),
(T_op, 0, 0),
(T_op_fault_to_surf, 0, data.n_surf_dofs),
shape = (data.n_dofs, data.n_dofs)
)
iop2 = CompositeOp(
(mass_op, 0, 0),
(T_op, 0, 0),
(T_op_fault_to_surf2, 0, data.n_surf_dofs),
shape = (data.n_dofs, data.n_dofs)
)
return (
solve.iterative_solve(iop, cs, tol = 1e-6),
solve.iterative_solve(iop2, cs, tol = 1e-6)
)
def main():
obj = Okada(21, 10, top_depth = -0.2)
soln, soln2 = obj.run(build_and_solve = build_and_solve_T)
# okada_soln = obj.okada_exact()
obj.xsec_plot([soln, soln2], okada_soln = None)
if __name__ == "__main__":
main()
|
mit
|
apache/spark
|
python/pyspark/pandas/tests/test_series_string.py
|
15
|
13727
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
import re
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class SeriesStringTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pser(self):
return pd.Series(
[
"apples",
"Bananas",
"carrots",
"1",
"100",
"",
"\nleading-whitespace",
"trailing-Whitespace \t",
None,
np.NaN,
]
)
def check_func(self, func, almost=False):
self.check_func_on_series(func, self.pser, almost=almost)
def check_func_on_series(self, func, pser, almost=False):
self.assert_eq(func(ps.from_pandas(pser)), func(pser), almost=almost)
def test_string_add_str_num(self):
pdf = pd.DataFrame(dict(col1=["a"], col2=[1]))
psdf = ps.from_pandas(pdf)
with self.assertRaises(TypeError):
psdf["col1"] + psdf["col2"]
def test_string_add_assign(self):
pdf = pd.DataFrame(dict(col1=["a", "b", "c"], col2=["1", "2", "3"]))
psdf = ps.from_pandas(pdf)
psdf["col1"] += psdf["col2"]
pdf["col1"] += pdf["col2"]
self.assert_eq(psdf["col1"], pdf["col1"])
def test_string_add_str_str(self):
pdf = pd.DataFrame(dict(col1=["a", "b", "c"], col2=["1", "2", "3"]))
psdf = ps.from_pandas(pdf)
# TODO: Fix the Series names
self.assert_eq(psdf["col1"] + psdf["col2"], pdf["col1"] + pdf["col2"])
self.assert_eq(psdf["col2"] + psdf["col1"], pdf["col2"] + pdf["col1"])
def test_string_add_str_lit(self):
pdf = pd.DataFrame(dict(col1=["a", "b", "c"]))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["col1"] + "_lit", pdf["col1"] + "_lit")
self.assert_eq("_lit" + psdf["col1"], "_lit" + pdf["col1"])
def test_string_capitalize(self):
self.check_func(lambda x: x.str.capitalize())
def test_string_title(self):
self.check_func(lambda x: x.str.title())
def test_string_lower(self):
self.check_func(lambda x: x.str.lower())
def test_string_upper(self):
self.check_func(lambda x: x.str.upper())
def test_string_swapcase(self):
self.check_func(lambda x: x.str.swapcase())
def test_string_startswith(self):
pattern = "car"
self.check_func(lambda x: x.str.startswith(pattern))
self.check_func(lambda x: x.str.startswith(pattern, na=False))
def test_string_endswith(self):
pattern = "s"
self.check_func(lambda x: x.str.endswith(pattern))
self.check_func(lambda x: x.str.endswith(pattern, na=False))
def test_string_strip(self):
self.check_func(lambda x: x.str.strip())
self.check_func(lambda x: x.str.strip("es\t"))
self.check_func(lambda x: x.str.strip("1"))
def test_string_lstrip(self):
self.check_func(lambda x: x.str.lstrip())
self.check_func(lambda x: x.str.lstrip("\n1le"))
self.check_func(lambda x: x.str.lstrip("s"))
def test_string_rstrip(self):
self.check_func(lambda x: x.str.rstrip())
self.check_func(lambda x: x.str.rstrip("\t ec"))
self.check_func(lambda x: x.str.rstrip("0"))
def test_string_get(self):
self.check_func(lambda x: x.str.get(6))
self.check_func(lambda x: x.str.get(-1))
def test_string_isalnum(self):
self.check_func(lambda x: x.str.isalnum())
def test_string_isalpha(self):
self.check_func(lambda x: x.str.isalpha())
def test_string_isdigit(self):
self.check_func(lambda x: x.str.isdigit())
def test_string_isspace(self):
self.check_func(lambda x: x.str.isspace())
def test_string_islower(self):
self.check_func(lambda x: x.str.islower())
def test_string_isupper(self):
self.check_func(lambda x: x.str.isupper())
def test_string_istitle(self):
self.check_func(lambda x: x.str.istitle())
def test_string_isnumeric(self):
self.check_func(lambda x: x.str.isnumeric())
def test_string_isdecimal(self):
self.check_func(lambda x: x.str.isdecimal())
def test_string_cat(self):
psser = ps.from_pandas(self.pser)
with self.assertRaises(NotImplementedError):
psser.str.cat()
def test_string_center(self):
self.check_func(lambda x: x.str.center(0))
self.check_func(lambda x: x.str.center(10))
self.check_func(lambda x: x.str.center(10, "x"))
def test_string_contains(self):
self.check_func(lambda x: x.str.contains("le", regex=False))
self.check_func(lambda x: x.str.contains("White", case=True, regex=False))
self.check_func(lambda x: x.str.contains("apples|carrots", regex=True))
self.check_func(lambda x: x.str.contains("BANANAS", flags=re.IGNORECASE, na=False))
def test_string_count(self):
self.check_func(lambda x: x.str.count("wh|Wh"))
self.check_func(lambda x: x.str.count("WH", flags=re.IGNORECASE))
def test_string_decode(self):
psser = ps.from_pandas(self.pser)
with self.assertRaises(NotImplementedError):
psser.str.decode("utf-8")
def test_string_encode(self):
psser = ps.from_pandas(self.pser)
with self.assertRaises(NotImplementedError):
psser.str.encode("utf-8")
def test_string_extract(self):
psser = ps.from_pandas(self.pser)
with self.assertRaises(NotImplementedError):
psser.str.extract("pat")
def test_string_extractall(self):
psser = ps.from_pandas(self.pser)
with self.assertRaises(NotImplementedError):
psser.str.extractall("pat")
def test_string_find(self):
self.check_func(lambda x: x.str.find("a"))
self.check_func(lambda x: x.str.find("a", start=3))
self.check_func(lambda x: x.str.find("a", start=0, end=1))
def test_string_findall(self):
self.check_func_on_series(lambda x: x.str.findall("es|as").apply(str), self.pser[:-1])
self.check_func_on_series(
lambda x: x.str.findall("wh.*", flags=re.IGNORECASE).apply(str), self.pser[:-1]
)
def test_string_index(self):
pser = pd.Series(["tea", "eat"])
self.check_func_on_series(lambda x: x.str.index("ea"), pser)
with self.assertRaises(Exception):
self.check_func_on_series(lambda x: x.str.index("ea", start=0, end=2), pser)
with self.assertRaises(Exception):
self.check_func(lambda x: x.str.index("not-found"))
def test_string_join(self):
pser = pd.Series([["a", "b", "c"], ["xx", "yy", "zz"]])
self.check_func_on_series(lambda x: x.str.join("-"), pser)
self.check_func(lambda x: x.str.join("-"))
def test_string_len(self):
self.check_func(lambda x: x.str.len())
pser = pd.Series([["a", "b", "c"], ["xx"], []])
self.check_func_on_series(lambda x: x.str.len(), pser)
def test_string_ljust(self):
self.check_func(lambda x: x.str.ljust(0))
self.check_func(lambda x: x.str.ljust(10))
self.check_func(lambda x: x.str.ljust(30, "x"))
def test_string_match(self):
self.check_func(lambda x: x.str.match("in"))
self.check_func(lambda x: x.str.match("apples|carrots", na=False))
self.check_func(lambda x: x.str.match("White", case=True))
self.check_func(lambda x: x.str.match("BANANAS", flags=re.IGNORECASE))
def test_string_normalize(self):
self.check_func(lambda x: x.str.normalize("NFC"))
self.check_func(lambda x: x.str.normalize("NFKD"))
def test_string_pad(self):
self.check_func(lambda x: x.str.pad(10))
self.check_func(lambda x: x.str.pad(10, side="both"))
self.check_func(lambda x: x.str.pad(10, side="right", fillchar="-"))
def test_string_partition(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.str.partition())
def test_string_repeat(self):
self.check_func(lambda x: x.str.repeat(repeats=3))
with self.assertRaises(TypeError):
self.check_func(lambda x: x.str.repeat(repeats=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))
def test_string_replace(self):
self.check_func(lambda x: x.str.replace("a.", "xx", regex=True))
self.check_func(lambda x: x.str.replace("a.", "xx", regex=False))
self.check_func(lambda x: x.str.replace("ing", "0", flags=re.IGNORECASE))
# reverse every lowercase word
repl = lambda m: m.group(0)[::-1]
self.check_func(lambda x: x.str.replace(r"[a-z]+", repl))
# compiled regex with flags
regex_pat = re.compile(r"WHITESPACE", flags=re.IGNORECASE)
self.check_func(lambda x: x.str.replace(regex_pat, "---"))
def test_string_rfind(self):
self.check_func(lambda x: x.str.rfind("a"))
self.check_func(lambda x: x.str.rfind("a", start=3))
self.check_func(lambda x: x.str.rfind("a", start=0, end=1))
def test_string_rindex(self):
pser = pd.Series(["teatea", "eateat"])
self.check_func_on_series(lambda x: x.str.rindex("ea"), pser)
with self.assertRaises(Exception):
self.check_func_on_series(lambda x: x.str.rindex("ea", start=0, end=2), pser)
with self.assertRaises(Exception):
self.check_func(lambda x: x.str.rindex("not-found"))
def test_string_rjust(self):
self.check_func(lambda x: x.str.rjust(0))
self.check_func(lambda x: x.str.rjust(10))
self.check_func(lambda x: x.str.rjust(30, "x"))
def test_string_rpartition(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.str.rpartition())
def test_string_slice(self):
self.check_func(lambda x: x.str.slice(start=1))
self.check_func(lambda x: x.str.slice(stop=3))
self.check_func(lambda x: x.str.slice(step=2))
self.check_func(lambda x: x.str.slice(start=0, stop=5, step=3))
def test_string_slice_replace(self):
self.check_func(lambda x: x.str.slice_replace(1, repl="X"))
self.check_func(lambda x: x.str.slice_replace(stop=2, repl="X"))
self.check_func(lambda x: x.str.slice_replace(start=1, stop=3, repl="X"))
def test_string_split(self):
self.check_func_on_series(lambda x: repr(x.str.split()), self.pser[:-1])
self.check_func_on_series(lambda x: repr(x.str.split(r"p*")), self.pser[:-1])
pser = pd.Series(["This is a sentence.", "This-is-a-long-word."])
self.check_func_on_series(lambda x: repr(x.str.split(n=2)), pser)
self.check_func_on_series(lambda x: repr(x.str.split(pat="-", n=2)), pser)
self.check_func_on_series(lambda x: x.str.split(n=2, expand=True), pser, almost=True)
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.str.split(expand=True))
def test_string_rsplit(self):
self.check_func_on_series(lambda x: repr(x.str.rsplit()), self.pser[:-1])
self.check_func_on_series(lambda x: repr(x.str.rsplit(r"p*")), self.pser[:-1])
pser = pd.Series(["This is a sentence.", "This-is-a-long-word."])
self.check_func_on_series(lambda x: repr(x.str.rsplit(n=2)), pser)
self.check_func_on_series(lambda x: repr(x.str.rsplit(pat="-", n=2)), pser)
self.check_func_on_series(lambda x: x.str.rsplit(n=2, expand=True), pser, almost=True)
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.str.rsplit(expand=True))
def test_string_translate(self):
m = str.maketrans({"a": "X", "e": "Y", "i": None})
self.check_func(lambda x: x.str.translate(m))
def test_string_wrap(self):
self.check_func(lambda x: x.str.wrap(5))
self.check_func(lambda x: x.str.wrap(5, expand_tabs=False))
self.check_func(lambda x: x.str.wrap(5, replace_whitespace=False))
self.check_func(lambda x: x.str.wrap(5, drop_whitespace=False))
self.check_func(lambda x: x.str.wrap(5, break_long_words=False))
self.check_func(lambda x: x.str.wrap(5, break_on_hyphens=False))
def test_string_zfill(self):
self.check_func(lambda x: x.str.zfill(10))
def test_string_get_dummies(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.str.get_dummies())
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_series_string import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
mackelab/autohmm
|
autohmm/utils.py
|
2
|
4939
|
from __future__ import division, print_function, absolute_import
import time
import sys
from collections import deque
import numpy as np
class ConvergenceMonitor(object):
"""Monitors and reports convergence to :data:`sys.stderr`.
Parameters
----------
tol : double
Convergence threshold. EM has converged either if the maximum
number of iterations is reached or the log probability
improvement between the two consecutive iterations is less
than threshold.
n_iter : int
Maximum number of iterations to perform.
verbose : bool
If ``True`` then per-iteration convergence reports are printed,
otherwise the monitor is mute.
Attributes
----------
history : deque
The log probability of the data for the last two training
iterations. If the values are not strictly increasing, the
model did not converge.
iter : int
Number of iterations performed while training the model.
Note
----
The convergence monitor is adapted from hmmlearn.base.
"""
fmt = "{iter:>10d} {logprob:>16.4f} {delta:>+16.4f}"
def __init__(self, tol, n_iter, n_iter_min, verbose):
self.tol = tol
self.n_iter = n_iter
self.n_iter_min = n_iter_min
self.verbose = verbose
self.history = deque(maxlen=2)
self.iter = 1
def __repr__(self):
class_name = self.__class__.__name__
params = dict(vars(self), history=list(self.history))
return "{0}({1})".format(
class_name, _pprint(params, offset=len(class_name)))
def report(self, logprob):
"""Reports the log probability of the next iteration."""
if self.history and self.verbose:
delta = logprob - self.history[-1]
message = self.fmt.format(
iter=self.iter, logprob=logprob, delta=delta)
print(message, file=sys.stderr)
self.history.append(logprob)
self.iter += 1
@property
def converged(self):
"""``True`` if the EM-algorithm converged and ``False`` otherwise."""
has_converged = False
if self.iter < self.n_iter_min:
return has_converged
if len(self.history) == 2:
diff = self.history[1] - self.history[0]
absdiff = abs(diff)
if diff < 0:
if self.verbose:
print('Warning: LL did decrease', file=sys.stderr)
has_converged = True
if absdiff < self.tol:
if self.verbose:
print('Converged, |difference| is: {}'.format(absdiff))
has_converged = True
if self.iter == self.n_iter:
if self.verbose:
print('Warning: Maximum iterations reached', file=sys.stderr)
has_converged = True
return has_converged
class Timer(object):
"""Helper class to time performance"""
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tenter = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print('{}: '.format(self.name))
print('Elapsed: {}'.format((time.time() - self.tenter)))
def gamma_prior_params(mean_gamma_prior, var_gamma_prior):
"""Returns ``weight`` and ``prior`` for a gamma prior with mean and var"""
# mean: alpha / beta, var: alpha / beta**2
beta = mean_gamma_prior / var_gamma_prior
alpha = mean_gamma_prior * beta
weight = alpha
prior = np.array((beta,beta))
return weight, prior
def sequence_to_rects(seq=None, y=-5, height=10,
colors = ['0.2','0.4', '0.6', '0.7']):
"""Transforms a state sequence to rects for plotting with matplotlib.
Parameters
----------
seq : array
state sequence
y : int
lower left corner
height: int
height
colors : array
array of label colors
Returns
-------
rects : dict
.xy : tuple
(x,y) tuple specifying the lower left
.width: int
width of rect
.height : int
height of rect
.label : int
state label
.color : string
color string
"""
y_ = y
height_ = height
label_ = seq[0]
x_ = -0.5
width_ = 1.0
rects = []
for s in range(1,len(seq)):
if seq[s] != seq[s-1] or s == len(seq)-1:
rects.append({'xy': (x_, y_),
'width': width_,
'height': height_,
'label': int(label_),
'color': colors[int(label_)]})
x_ = s-0.5
width_ = 1.0
label_ = seq[s]
else:
if s == len(seq)-2:
width_ += 2.0
else:
width_ += 1.0
return rects
|
bsd-2-clause
|
joelgrus/data-science-from-scratch
|
first-edition/code/gradient_descent.py
|
53
|
5895
|
from __future__ import division
from collections import Counter
from linear_algebra import distance, vector_subtract, scalar_multiply
import math, random
def sum_of_squares(v):
"""computes the sum of squared elements in v"""
return sum(v_i ** 2 for v_i in v)
def difference_quotient(f, x, h):
return (f(x + h) - f(x)) / h
def plot_estimated_derivative():
def square(x):
return x * x
def derivative(x):
return 2 * x
derivative_estimate = lambda x: difference_quotient(square, x, h=0.00001)
# plot to show they're basically the same
import matplotlib.pyplot as plt
x = range(-10,10)
plt.plot(x, map(derivative, x), 'rx') # red x
plt.plot(x, map(derivative_estimate, x), 'b+') # blue +
plt.show() # purple *, hopefully
def partial_difference_quotient(f, v, i, h):
# add h to just the i-th element of v
w = [v_j + (h if j == i else 0)
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
def estimate_gradient(f, v, h=0.00001):
return [partial_difference_quotient(f, v, i, h)
for i, _ in enumerate(v)]
def step(v, direction, step_size):
"""move step_size in the direction from v"""
return [v_i + step_size * direction_i
for v_i, direction_i in zip(v, direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
def safe(f):
"""define a new function that wraps f and return it"""
def safe_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
return float('inf') # this means "infinity" in Python
return safe_f
#
#
# minimize / maximize batch
#
#
def minimize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
"""use gradient descent to find theta that minimizes target function"""
step_sizes = [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
theta = theta_0 # set theta to initial value
target_fn = safe(target_fn) # safe version of target_fn
value = target_fn(theta) # value we're minimizing
while True:
gradient = gradient_fn(theta)
next_thetas = [step(theta, gradient, -step_size)
for step_size in step_sizes]
# choose the one that minimizes the error function
next_theta = min(next_thetas, key=target_fn)
next_value = target_fn(next_theta)
# stop if we're "converging"
if abs(value - next_value) < tolerance:
return theta
else:
theta, value = next_theta, next_value
def negate(f):
"""return a function that for any input x returns -f(x)"""
return lambda *args, **kwargs: -f(*args, **kwargs)
def negate_all(f):
"""the same when f returns a list of numbers"""
return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]
def maximize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
return minimize_batch(negate(target_fn),
negate_all(gradient_fn),
theta_0,
tolerance)
#
# minimize / maximize stochastic
#
def in_random_order(data):
"""generator that returns the elements of data in random order"""
indexes = [i for i, _ in enumerate(data)] # create a list of indexes
random.shuffle(indexes) # shuffle them
for i in indexes: # return the data in that order
yield data[i]
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
data = zip(x, y)
theta = theta_0 # initial guess
alpha = alpha_0 # initial step size
min_theta, min_value = None, float("inf") # the minimum so far
iterations_with_no_improvement = 0
# if we ever go 100 iterations with no improvement, stop
while iterations_with_no_improvement < 100:
value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )
if value < min_value:
# if we've found a new minimum, remember it
# and go back to the original step size
min_theta, min_value = theta, value
iterations_with_no_improvement = 0
alpha = alpha_0
else:
# otherwise we're not improving, so try shrinking the step size
iterations_with_no_improvement += 1
alpha *= 0.9
# and take a gradient step for each of the data points
for x_i, y_i in in_random_order(data):
gradient_i = gradient_fn(x_i, y_i, theta)
theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
return min_theta
def maximize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
return minimize_stochastic(negate(target_fn),
negate_all(gradient_fn),
x, y, theta_0, alpha_0)
if __name__ == "__main__":
print "using the gradient"
v = [random.randint(-10,10) for i in range(3)]
tolerance = 0.0000001
while True:
#print v, sum_of_squares(v)
gradient = sum_of_squares_gradient(v) # compute the gradient at v
next_v = step(v, gradient, -0.01) # take a negative gradient step
if distance(next_v, v) < tolerance: # stop if we're converging
break
v = next_v # continue if we're not
print "minimum v", v
print "minimum value", sum_of_squares(v)
print
print "using minimize_batch"
v = [random.randint(-10,10) for i in range(3)]
v = minimize_batch(sum_of_squares, sum_of_squares_gradient, v)
print "minimum v", v
print "minimum value", sum_of_squares(v)
|
mit
|
optimizers/nlpy
|
nlpy/optimize/solvers/nlpy_regqp.py
|
3
|
6275
|
#!/usr/bin/env python
from nlpy import __version__
from nlpy.model import SlackFramework
from nlpy.optimize.solvers.cqp import RegQPInteriorPointSolver
from nlpy.optimize.solvers.cqp import RegQPInteriorPointSolver3x3
from nlpy.tools.norms import norm2
from nlpy.tools.timing import cputime
from optparse import OptionParser
import numpy
import os
import sys
import logging
# Create root logger.
log = logging.getLogger('cqp')
log.setLevel(logging.INFO)
fmt = logging.Formatter('%(name)-10s %(levelname)-8s %(message)s')
hndlr = logging.StreamHandler(sys.stdout)
hndlr.setFormatter(fmt)
log.addHandler(hndlr)
# Configure the solver logger.
sublogger = logging.getLogger('cqp.solver')
sublogger.setLevel(logging.INFO)
sublogger.addHandler(hndlr)
sublogger.propagate = False
usage_msg = """%prog [options] problem1 [... problemN]
where problem1 through problemN represent convex quadratic programs."""
# Define formats for output table.
hdrfmt = '%-15s %5s %15s %7s %7s %7s %6s %6s %4s'
hdr = hdrfmt % ('Name', 'Iter', 'Objective', 'pResid', 'dResid',
'Gap', 'Setup', 'Solve', 'Stat')
fmt = '%-15s %5d %15.8e %7.1e %7.1e %7.1e %6.2f %6.2f %4s'
# Define allowed command-line options
parser = OptionParser(usage=usage_msg, version='%prog version ' + __version__)
# File name options
parser.add_option("-i", "--iter", action="store", type="int", default=None,
dest="maxiter", help="Specify maximum number of iterations")
parser.add_option("-t", "--tol", action="store", type="float", default=None,
dest="tol", help="Specify relative stopping tolerance")
parser.add_option("-p", "--regpr", action="store", type="float", default=None,
dest="regpr", help="Specify initial primal regularization parameter")
parser.add_option("-d", "--regdu", action="store", type="float", default=None,
dest="regdu", help="Specify initial dual regularization parameter")
parser.add_option("-S", "--no-scale", action="store_true",
dest="no_scale", default=False, help="Turn off problem scaling")
parser.add_option("-3", "--3x3", action="store_true",
dest="sys3x3", default=False, help="Use 3x3 block linear system")
parser.add_option("-l", "--long-step", action="store_true", default=False,
dest="longstep", help="Use long-step method")
parser.add_option("-f", "--assume-feasible", action="store_true",
default=False, dest="assume_feasible",
help="Deactivate infeasibility check")
parser.add_option("-V", "--verbose", action="store_true", default=False,
dest="verbose", help="Set verbose mode")
# Parse command-line options
(options, args) = parser.parse_args()
# Decide which class to instantiate.
if options.sys3x3:
print 'Brave man! Using 3x3 block system!'
Solver = RegQPInteriorPointSolver3x3
else:
Solver = RegQPInteriorPointSolver
opts_init = {}
if options.regpr is not None:
opts_init['regpr'] = options.regpr
if options.regdu is not None:
opts_init['regdu'] = options.regdu
opts_solve = {}
if options.maxiter is not None:
opts_solve['itermax'] = options.maxiter
if options.tol is not None:
opts_solve['tolerance'] = options.tol
# Set printing standards for arrays.
numpy.set_printoptions(precision=3, linewidth=70, threshold=10, edgeitems=2)
multiple_problems = len(args) > 1
if not options.verbose:
log.info(hdr)
log.info('-'*len(hdr))
for probname in args:
t_setup = cputime()
qp = SlackFramework(probname)
t_setup = cputime() - t_setup
# isqp() should be implemented in the near future.
#if not qp.isqp():
# log.info('Problem %s is not a quadratic program\n' % probname)
# qp.close()
# continue
# Pass problem to RegQP.
regqp = Solver(qp,
scale=not options.no_scale,
verbose=options.verbose,
**opts_init)
regqp.solve(PredictorCorrector=not options.longstep,
check_infeasible=not options.assume_feasible,
**opts_solve)
# Display summary line.
probname=os.path.basename(probname)
if probname[-3:] == '.nl': probname = probname[:-3]
if not options.verbose:
log.info(fmt % (probname, regqp.iter, regqp.obj_value,
regqp.pResid, regqp.dResid, regqp.rgap,
t_setup, regqp.solve_time, regqp.short_status))
if regqp.short_status == 'degn':
log.info(' F') # Could not regularize sufficiently.
qp.close()
log.info('-'*len(hdr))
if not multiple_problems:
x = regqp.x[:qp.original_n]
log.info('Final x: %s, |x| = %7.1e' % (repr(x),norm2(x)))
log.info('Final y: %s, |y| = %7.1e' % (repr(regqp.y),norm2(regqp.y)))
log.info('Final z: %s, |z| = %7.1e' % (repr(regqp.z),norm2(regqp.z)))
log.info(regqp.status)
log.info('#Iterations: %-d' % regqp.iter)
log.info('RelResidual: %7.1e' % regqp.kktResid)
log.info('Final cost : %21.15e' % regqp.obj_value)
log.info('Setup time : %6.2fs' % t_setup)
log.info('Solve time : %6.2fs' % regqp.solve_time)
# Plot linear system statistics.
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca()
ax.semilogy(regqp.lres_history)
ax.set_title('LS Relative Residual')
fig2 = plt.figure()
ax2 = fig2.gca()
ax2.semilogy(regqp.derr_history)
ax2.set_title('Direct Error Estimate')
fig3 = plt.figure()
ax3 = fig3.gca()
ax3.semilogy([cond[0] for cond in regqp.cond_history], label='K1')
ax3.semilogy([cond[1] for cond in regqp.cond_history], label='K2')
ax3.legend(loc='upper left')
ax3.set_title('Condition number estimates of Arioli, Demmel, Duff')
fig4 = plt.figure()
ax4 = fig4.gca()
ax4.semilogy([berr[0] for berr in regqp.berr_history], label='bkwrd err1')
ax4.semilogy([berr[1] for berr in regqp.berr_history], label='bkwrd err2')
ax4.legend(loc='upper left')
ax4.set_title('Backward Error Estimates of Arioli, Demmel, Duff')
fig5 = plt.figure()
ax5 = fig5.gca()
ax5.semilogy([nrm[0] for nrm in regqp.nrms_history], label='Matrix norm')
ax5.semilogy([nrm[1] for nrm in regqp.nrms_history], label='Solution norm')
ax5.legend(loc='upper left')
ax5.set_title('Infinity Norm Estimates')
plt.show()
|
gpl-3.0
|
vansky/meg_playground
|
scripts/meg_frequency_scanner.py
|
1
|
20480
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# Imports
# =======
# <codecell>
#disable autosave functionality;
#This script is taxing enough, and autosaving tends to push it over the edge
# plus, autosaving seems to zero out the file before restoring it from the backup
# this means an autosave causing a crash will actually delete the file rather than saving it!!!
#%autosave 0
# <codecell>
#basic imports
#%pylab inline
import time
import pickle
import logging as L
L.basicConfig(level=L.ERROR) # INFO)
import time
import numpy
import scipy.stats
import os
#import pylab
import sklearn
import scipy
import sklearn.linear_model
import sys
import re
#pylab.rcParams['figure.figsize'] = 10,10 #change the default image size for this session
#pylab.ion()
# <codecell>
#custom imports
#%cd ../scripts
# brian's prototype routines
from protoMEEGutils import *
import protoSpectralWinFFTMapper as specfft
# <markdowncell>
# Definitions
# ===========
# <codecell>
# OUTLINE, 19th Nov 2014
#
# script for initial "signs of life" analysis of single MEG
#
# load in a meg file in EEGlab format
# load in the word properties
# choose a "languagey" channel (left-temporal, clean, with expected ERF patterns)
# plot some ERFs (e.g. all nouns vs all preps) as sanity check
# do the earlier analysis of R^2 and betas due to lexical access vars, up to X words back
# hand over to Marten, so he can do the analysis based on syntactic embedding...
#### SUBROUTINES ####
# plot the time-scale for ERFs and other epoch figures
#def commonPlotProps():
#zeroSample = (abs(epochStart)/float(epochLength)*epochNumTimepoints)
#pylab.plot((0,epochNumTimepoints),(0,0),'k--')
#pylab.ylim((-2.5e-13,2.5e-13)) #((-5e-14,5e-14)) # better way just to get the ones it choose itself?
#pylab.plot((zeroSample,zeroSample),(0,0.01),'k--')
# pylab.xticks(numpy.linspace(0,epochNumTimepoints,7),epochStart+(numpy.linspace(0,epochNumTimepoints,7)/samplingRate))
# pylab.xlabel('time (s) relative to auditory onset') #+refEvent)
# pylab.xlim((62,313))
# pylab.show()
# pylab.axhline(0, color='k', linestyle='--')
# pylab.axvline(125, color='k', linestyle='--')
# adjust R2 down for the artificial inflation you get by increasing the number of explanatory features
def adjustR2(R2, numFeatures, numSamples):
#1/0
#return R2
return R2-(1-R2)*(float(numFeatures)/(numSamples-numFeatures-1))
# normalise (z-scale) the scale of variables (for the explanatory ones, so the magnitude of beta values are comparably interpretable)
def mynormalise(A):
A = scipy.stats.zscore(A)
A[numpy.isnan(A)] = 0
return A
# <markdowncell>
# Preprocessing
# =============
# <markdowncell>
# Input Params
# ----------
# <codecell>
#change to the data directory to load in the data
#%cd ../MEG_data
#*# MARTY #*# choose a file - I found participant V to be pretty good, and 0.01 to 50Hz filter is pretty conservative #*#
(megFileTag1, megFile1) = ('V_TSSS_0.01-50Hz_@125', '../MEG_data/v_hod_allRuns_tsss_audiobookPrepro_stPad1_lp50_resamp125_frac10ICAed.set')#_hp0.010000.set')
(megFileTag2, megFile2) = ('A_TSSS_0.01-50Hz_@125', '../MEG_data/aud_hofd_a_allRuns_tsss_audiobookPrepro_stPad1_lp50_resamp125_frac10ICAed_hp0.010000.set')
(megFileTag3, megFile3) = ('C_TSSS_0.01-50Hz_@125', '../MEG_data/aud_hofd_c_allRuns_tsss_audiobookPrepro_stPad1_lp50_resamp125_frac10ICAed_hp0.010000.set')
## put your on properties in here, as a .tab file of similar format (tab delimited, and field names in a first comment line - should be easy to do in excel...)
#to get the V5.tab:
# python ../scripts/buildtab.py hod_JoeTimes_LoadsaFeaturesV3.tab hod.wsj02to21-comparativized-gcg15-1671-4sm.fullberk.parsed.gcgbadwords > hod_JoeTimes_LoadsaFeaturesV4.tab
# python ../scripts/addsentid.py hod_JoeTimes_LoadsaFeaturesV4.tab > hod_JoeTimes_LoadsaFeaturesV5.tab
tokenPropsFile = '../MEG_data/hod_JoeTimes_LoadsaFeaturesV5.tab'
# WHICH CHANNELS TO LOOK AT AS ERFS
#*# MARTY #*# decide which channels to use - channels of interest are the first few you can look at in an ERF, and then from them you can choose one at a time with "channelToAnalyse" for the actual regression analysis #*#
#channelLabels = ['MEG0111', 'MEG0121', 'MEG0131', 'MEG0211', 'MEG0212', 'MEG0213', 'MEG0341']
#?# this way of doing things was a slightly clumsy work-around, cos I didn't have enough memory to epoch all 306 channels at one time
# LOAD WORD PROPS
#*# MARTY #*# change dtype to suit the files in your .tab file #*#
tokenProps = scipy.genfromtxt(tokenPropsFile,
delimiter='\t',names=True,
dtype="i4,f4,f4,S50,S50,i2,i2,i2,S10,f4,f4,f4,f4,f4,f4,f4,f4,f4,f4,f4,i1,>i4")
# ... and temporarily save as cpickle archive to satisfy the way I programmed the convenience function loadBookMEGWithAudio (it expects to find the same info in a C-pickle file, and so doesn't need to know about number and type of fields)
tokenPropsPickle = tokenPropsFile+'.cpk'
cPickle.dump(tokenProps, open(tokenPropsPickle, 'wb'))
# <markdowncell>
# Trial Params
# ------------
# <codecell>
triggersOfInterest=['s%d' % i for i in range(1,10)]
refEvent = 'onTime' #,'offTime']
#*# MARTY #*# guess an epoch of -0.5 to +1s should be enough #*#
epochStart = -1; # stimulus ref event
epochEnd = +2; #
epochLength = epochEnd-epochStart;
baseline = False #[-1,0]
# <markdowncell>
# Epoch Data
# ----------
# <codecell>
# Get the goods on subject 1
(contSignalData1, metaData1, trackTrials, tokenPropsOrig, audioSignal, samplingRate, numChannels) = loadBookMEGWithAudio(megFile1, tokenPropsPickle, triggersOfInterest, epochEnd, epochStart, icaComps=False)
# Get the goods on subject 2
(contSignalData2, metaData2, trackTrials, tokenPropsOrig, audioSignal, samplingRate, numChannels) = loadBookMEGWithAudio(megFile2, tokenPropsPickle, triggersOfInterest, epochEnd, epochStart, icaComps=False)
# Get the goods on subject 3
(contSignalData3, metaData3, trackTrials, tokenPropsOrig, audioSignal, samplingRate, numChannels) = loadBookMEGWithAudio(megFile3, tokenPropsPickle, triggersOfInterest, epochEnd, epochStart, icaComps=False)
tokenProps = numpy.concatenate((tokenPropsOrig,tokenPropsOrig,tokenPropsOrig),axis=0)
#channelsOfInterest = [i for i in range(len(metaData1.chanlocs)) if metaData1.chanlocs[i].labels in channelLabels]
# REDUCE TRIALS TO JUST THOSE THAT CONTAIN A REAL WORD (NOT PUNCTUATION, SPACES, ...)
wordTrialsBool = numpy.array([p != '' for p in tokenProps['stanfPOS']])
#print(wordTrialsBool[:10])
# REDUCE TRIALS TO JUST THOSE THAT HAVE A DECENT DEPTH ESTIMATE
parsedTrialsBool = numpy.array([d != -1 for d in tokenProps['syndepth']])
#print(parsedTrialsBool[:10])
# <codecell>
# Set up the dev and test sets
devsizerecip = 3 # the reciprocal of the dev size, so devsizerecip = 3 means the dev set is 1/3 and the test set is 2/3
devitems = numpy.arange(1,max(tokenProps['sentid']),devsizerecip)
devTrialsBool = numpy.array([s in devitems for s in tokenProps['sentid']])
testTrialsBool = numpy.array([s not in devitems for s in tokenProps['sentid']])
inDataset = devTrialsBool
freqsource = None # determines how the frequency bands are defined #Can be 'weiss', 'wiki', or an interpolation of the two
avefitresults = {}
maxfitresults = {}
for channelix in range(metaData1.chanlocs.shape[0]-1): #minus 1 because the last 'channel' is MISC
print 'Compiling data from channel:',channelix
#need to reshape because severalMagChannels needs to be channel x samples, and 1-D shapes are flattened by numpy
severalMagChannels1 = contSignalData1[channelix,:].reshape((1,-1))
######
#####
# sys.stderr.write('metaData1.shape: '+str(metaData1.chanlocs.shape[0])+'\n')
# sys.stderr.write('contSignalData1.shape: '+str(contSignalData1.shape)+'\n')
# sys.stderr.write('severalMagChannels1.shape: '+str(severalMagChannels1.shape)+'\n')
# sys.stderr.write(str([metaData1.chanlocs[i].labels for i in range(len(metaData1.chanlocs))])+'\n')
# severalMagChannels1 = contSignalData1[channelix,:].reshape((1,-1))
# sys.stderr.write('contSignalData1.shape: '+str(contSignalData1.shape)+'\n')
# sys.stderr.write('severalMagChannels1.shape: '+str(severalMagChannels1.shape)+'\n')
# channelLabels = ['MEG0111', 'MEG0121', 'MEG0131', 'MEG0211', 'MEG0212', 'MEG0213', 'MEG0341']
# channelsOfInterest = [i for i in range(len(metaData1.chanlocs)) if metaData1.chanlocs[i].labels in channelLabels]
# severalMagChannels2 = contSignalData1[channelsOfInterest,:]
# sys.stderr.write('severalMagChannels2.shape: '+str(severalMagChannels2.shape)+'\n')
# severalMagChannels2 = contSignalData1[channelsOfInterest,:].reshape((1,-1))
# sys.stderr.write('severalMagChannels2.shape: '+str(severalMagChannels2.shape)+'\n')
# raise #need to determine whether we need to reshape with a center column or retain two dimensional...?
######
######
(wordTrials1, epochedSignalData1, epochSliceTimepoints, wordTimesAbsolute, numTrials, epochNumTimepoints) = wordTrialEpochify(severalMagChannels1, samplingRate, tokenPropsOrig, trackTrials, refEvent, epochEnd, epochStart)
# sys.stderr.write('epochedSignalData1.shape: '+str(epochedSignalData1.shape)+'\n')
# <codecell>
#del contSignalData1
#del severalMagChannels1
# <codecell>
severalMagChannels2 = contSignalData2[channelix,:].reshape((1,-1))
(wordTrials2, epochedSignalData2, epochSliceTimepoints, wordTimesAbsolute, numTrials, epochNumTimepoints) = wordTrialEpochify(severalMagChannels2, samplingRate, tokenPropsOrig, trackTrials, refEvent, epochEnd, epochStart)
# <codecell>
#del contSignalData2
#del severalMagChannels2
# <codecell>
severalMagChannels3 = contSignalData3[channelix,:].reshape((1,-1))
(wordTrials3, epochedSignalData3, epochSliceTimepoints, wordTimesAbsolute, numTrials, epochNumTimepoints) = wordTrialEpochify(severalMagChannels3, samplingRate, tokenPropsOrig, trackTrials, refEvent, epochEnd, epochStart)
# <codecell>
#del contSignalData3
#del severalMagChannels3
# <codecell>
epochedSignalData = numpy.concatenate((epochedSignalData1,epochedSignalData2,epochedSignalData3), axis=0)
# print(epochedSignalData.shape)
#print(tokenProps.shape)
# raise
# <codecell>
#print tokenProps.shape,epochedSignalData.shape
wordEpochs = epochedSignalData[wordTrialsBool & parsedTrialsBool & inDataset] #NB: Might not be
wordFeatures = tokenProps[wordTrialsBool & parsedTrialsBool & inDataset]
# print wordFeatures.shape, wordEpochs.shape
# raise
# <markdowncell>
# Spectral decomposition
# ---------------------
# <codecell>
#test reshape outcomes
#a = np.arange(18).reshape((3,2,3))
#print(a) #target: 3 epochs, 2 channels, 3 frequency_features
#b = np.arange(18).reshape((3,6))
#print(b) #fft output: 3 epochs, 2 channels x 3 frequency_features
#c = b.reshape((3,2,3))
#print(c) #reshaped output: 3 epochs, 2 channels, 3 frequency_features
# <codecell>
# The FFT script collapses across channels
# index0: epoch
# index1: channels x fft_feature_types x frequencies
# solution: reshape the output as (epochs,channels,-1)
#print 'wordEpochs: ',wordEpochs.shape
# Spectrally decompose the epochs
(mappedTrialFeatures, spectralFrequencies) = specfft.mapFeatures(wordEpochs,samplingRate,windowShape='hann',featureType='amp',freqRes=32)
# Reshape output to get epochs x channels x frequency
mappedTrialFeatures = mappedTrialFeatures.reshape((wordEpochs.shape[0],wordEpochs.shape[1],-1))
# print 'FFT output: ', mappedTrialFeatures.shape, spectralFrequencies.shape
# raise
# <codecell>
#print(spectralFrequencies)
freqbands = {}
if freqsource == 'weiss':
#Weiss et al. 05
freqbands['theta'] = numpy.nonzero( (spectralFrequencies >= 4) & (spectralFrequencies <= 7) )
freqbands['beta1'] = numpy.nonzero( (spectralFrequencies >= 13) & (spectralFrequencies <= 18) )
freqbands['beta2'] = numpy.nonzero( (spectralFrequencies >= 20) & (spectralFrequencies <= 28) )
freqbands['gamma'] = numpy.nonzero( (spectralFrequencies >= 30) & (spectralFrequencies <= 34) )
elif freqsource == 'wiki':
# end of http://en.wikipedia.org/wiki/Theta_rhythm
freqbands['delta'] = numpy.nonzero( (spectralFrequencies >= 0.1) & (spectralFrequencies <= 3) )
freqbands['theta'] = numpy.nonzero( (spectralFrequencies >= 4) & (spectralFrequencies <= 7) )
freqbands['alpha'] = numpy.nonzero( (spectralFrequencies >= 8) & (spectralFrequencies <= 15) )
freqbands['beta'] = numpy.nonzero( (spectralFrequencies >= 16) & (spectralFrequencies <= 31) )
freqbands['gamma'] = numpy.nonzero( (spectralFrequencies >= 32) & (spectralFrequencies <= 100) )
else:
#Interpolate between weiss and wiki
#print(numpy.nonzero((spectralFrequencies >= 4) & (spectralFrequencies <= 7)))
freqbands['theta'] = numpy.nonzero( (spectralFrequencies >= 4) & (spectralFrequencies <= 7) )
freqbands['alpha'] = numpy.nonzero( (spectralFrequencies >= 8) & (spectralFrequencies < 13) )
freqbands['beta1'] = numpy.nonzero( (spectralFrequencies >= 13) & (spectralFrequencies <= 18) )
freqbands['beta2'] = numpy.nonzero( (spectralFrequencies >= 20) & (spectralFrequencies <= 28) )
freqbands['gamma'] = numpy.nonzero( (spectralFrequencies >= 30) & (spectralFrequencies <= 34) )
#print(theta)
#print(theta[0])
# <markdowncell>
# Select channel for analysis
# --------------
# <codecell>
#print(channelLabels)
#channelToAnalyse = 3 # index of the channels above to actually run regression analysis on
#print 'Analyzing:', channelLabels[channelToAnalyse]
# <markdowncell>
# Run Regression Analysis
# ---------------------------
# <codecell>
# REGULARISATION VALUES TO TRY (e.g. in Ridge GCV)
regParam = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 1e+2, 2e+2, 5e+2, 1e+3, 2e+3, 5e+3]
# SELECT AND DESCRIBE THE REGRESSORS WE'RE CHOOSING TO USE
# this strings should match the names of the fields in tokenProps
#*# MARTY #*# here you should list the features you're choosing from your .tab file (just one?) #*#
features = [
#'logFreq_ANC',
#'surprisal2back_COCA',
#'bigramEntropy_COCA_here',
'syndepth'
]
#*# MARTY #*# ... this has shorthand versions of the variable names, for display, and also has to include the "position" one that this version of the script inserts by default #*#
labelMap = {
#'logFreq_ANC': 'freq',
#'surprisal2back_COCA': 'surprisal',
#'bigramEntropy_COCA_here': 'entropy',
#'sentenceSerial': 'position',
'syndepth': 'depth'
}
legendLabels = features
# <codecell>
# SLOT REGRESSORS IN ONE BY ONE
explanatoryFeatures = numpy.zeros((wordFeatures.shape)) # dummy
#explanatoryFeatures = numpy.array([])
for feature in features:
# print feature
explanatoryFeatures = numpy.vstack((explanatoryFeatures, wordFeatures[feature]))
explanatoryFeatures = explanatoryFeatures[1:].T # strip zeros out again
# PLOT EFFECTS X EPOCHS BACK
#*# MARTY #*# I guess you don't want to do the history thing (though is good initially for sanity check), so can leave this at 0 #*#
epochHistory = 0
# <codecell>
#tmpFeatures = explanatoryFeatures.copy()
#tmpLegend = legendLabels[:]
#for epochsBack in range(1,epochHistory+1):
# epochFeatures = numpy.zeros(tmpFeatures.shape)
# epochFeatures[epochsBack:,:] = tmpFeatures[:-epochsBack,:]
# explanatoryFeatures = numpy.hstack((explanatoryFeatures,epochFeatures))
# legendLabels = legendLabels + [l+'-'+str(epochsBack) for l in tmpLegend]
## put in sentence serial - can't leave in history, cos is too highly correlated across history...
#explanatoryFeatures = numpy.vstack((explanatoryFeatures.T, wordFeatures['sentenceSerial'])).T
#features.append('sentenceSerial')
#legendLabels.append('sentenceSerial')
# <codecell>
# STEP THROUGH EACH TIME POINT IN THE EPOCH, RUNNING REGRESSION FOR EACH ONE
bandavefits = {}
bandmaxfits = {}
for band in freqbands:
modelTrainingFit = []
modelTestCorrelation = []
modelParameters = []
legendLabels = features
for freq in freqbands[band]:
# WHICH VARIETY OF REGRESSION TO USE?
#*# MARTY #*# I get pretty similar results with all three of those below. The most generic (ie fewest extra assumptions) is normal LinearRegression. I guess RidgeCV should do best in terms of R^2, but has discontinuities in betas, as different regularisation parameters are optimal at each time step. LassoLars is something of a compromise. #*#
#lm = sklearn.linear_model.LinearRegression(fit_intercept=True, normalize=True)
#lm = sklearn.linear_model.RidgeCV(fit_intercept=True, normalize=True, alphas=regParam) #, 10000, 100000])
lm = sklearn.linear_model.LassoLars(alpha=0.0001) #(alpha=1.0, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=2.2204460492503131e-16, copy_X=True)
# NORMALISE THE EXPLANATORY VARIABLES? (for comparable beta magnitude interpretation)
#*# MARTY #*# choose whether to scale inputs #*#
trainX = mynormalise(explanatoryFeatures)
trainY = mynormalise(mappedTrialFeatures[:,0,freq])
#trainX = mynormalise(explanatoryFeatures)
#trainY = mynormalise(wordEpochs[:,channelToAnalyse,t])
#trainX = explanatoryFeatures
#trainY = wordEpochs[:,channelToAnalyse,t]
trainedLM = lm.fit(trainX,trainY)
modelParameters.append(lm)
#print(lm.score(trainX,trainY),trainX.shape[1], trainX.shape[0])
#modelTrainingFit.append(adjustR2(lm.score(trainX,trainY), trainX.shape[1], trainX.shape[0]))
modelTrainingFit.append(lm.score(trainX,trainY)) #for a single feature, no punishment is necessary
bandavefits[band] = numpy.mean(modelTrainingFit)
bandmaxfits[band] = numpy.max(modelTrainingFit)
avefitresults[ metaData1.chanlocs[channelix].labels ] = bandavefits
maxfitresults[ metaData1.chanlocs[channelix].labels ] = bandmaxfits
#print(modelTrainingFit)
#print(numpy.sort(modelTrainingFit)[::-1])
#print 'ave fit: ', numpy.mean(modelTrainingFit)
#print 'max fit: ', numpy.max(modelTrainingFit)
fitresults = {'ave':avefitresults,'max':maxfitresults}
cPickle.dump(fitresults, open('fitresults.cpk', 'wb'))
# <markdowncell>
# Graph results
# ============
# <codecell>
# DETERMINE IF THERE IS CORRELATION BETWEEN THE EXPLANATORY VARIABLES
#betaMatrix = numpy.array([p.coef_ for p in modelParameters])
#print(betaMatrix.shape)
#neatLabels = [l.replace(re.match(r'[^-]+',l).group(0), labelMap[re.match(r'[^-]+',l).group(0)]) for l in legendLabels if re.match(r'[^-]+',l).group(0) in labelMap]
#legendLabels = numpy.array(legendLabels)
##numFeaturesDisplay = len(legendLabels)
#neatLabels = numpy.array(neatLabels)
#
## <codecell>
#
## DO BIG SUMMARY PLOT OF FEATURE CORRELATIONS, R^2 OVER TIMECOURSE, BETAS OVER TIME COURSE, AND ERF/ERP
#f = pylab.figure(figsize=(10,10))
#s = pylab.subplot(2,2,1)
#pylab.title('R-squared '+str(trainedLM))
#pylab.plot(modelTrainingFit, linewidth=2)
#commonPlotProps()
#s = pylab.subplot(2,2,2)
#if betaMatrix.shape[1] > 7:
# pylab.plot(betaMatrix[:,:7], '-', linewidth=2)
# pylab.plot(betaMatrix[:,7:], '--', linewidth=2)
#else:
# pylab.plot(betaMatrix, '-', linewidth=2)
#
#pylab.legend(neatLabels)
##pylab.legend(legendLabels)
#pylab.title('betas for all (normed) variables')
#commonPlotProps()
#
#
#s = pylab.subplot(3,3,2)
#pylab.title('correlations between explanatory variables')
#pylab.imshow(numpy.abs(numpy.corrcoef(explanatoryFeatures.T)),interpolation='nearest', origin='upper') # leave out the dummy one
#pylab.clim(0,1)
#pylab.yticks(range(len(neatLabels)),neatLabels)
#pylab.ylim((-0.5,len(neatLabels)-0.5))
#pylab.xticks(range(len(neatLabels)),neatLabels, rotation=90)
#pylab.xlim((-0.5,len(neatLabels)-0.5))
#pylab.colorbar()
#
##fontP = FontProperties()
##fontP.set_size('small')
##legend([s], "title", prop = fontP)
#
##s = pylab.subplot(2,2,4)
##pylab.plot(numpy.mean(epochedSignalData[wordTrialsBool,channelToAnalyse],axis=0).T, linewidth=2)
##pylab.title('ERF')
##commonPlotProps()
#
##print 'history %d, mean model fit over -0.5s to +1.0s: %.5f, max is %.5f' % (epochHistory, numpy.mean(modelTrainingFit[62:250]), numpy.max(modelTrainingFit[62:250]))
##pylab.savefig('meg_testfig_%s.png' % (channelLabels[channelToAnalyse]))
#
#pylab.show()
# <codecell>
|
gpl-2.0
|
shangwuhencc/shogun
|
examples/undocumented/python_modular/graphical/so_multiclass_director_BMRM.py
|
16
|
4362
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from modshogun import RealFeatures
from modshogun import MulticlassModel, MulticlassSOLabels, RealNumber, DualLibQPBMSOSVM, DirectorStructuredModel
from modshogun import BMRM, PPBMRM, P3BMRM, ResultSet, RealVector
from modshogun import StructuredAccuracy
class MulticlassStructuredModel(DirectorStructuredModel):
def __init__(self,features,labels):
DirectorStructuredModel.__init__(self)
self.set_features(features)
self.set_labels(labels)
self.dim = features.get_dim_feature_space()*labels.get_num_classes()
self.n_classes = labels.get_num_classes()
self.n_feats = features.get_dim_feature_space()
#self.use_director_risk()
def get_dim(self):
return self.dim
def argmax(self,w,feat_idx,training):
feature_vector = self.get_features().get_feature_vector(feat_idx)
label = None
if training == True:
label = int(RealNumber.obtain_from_generic(self.get_labels().get_label(feat_idx)).value)
ypred = 0
max_score = -1e10
for c in xrange(self.n_classes):
score = 0.0
for i in xrange(self.n_feats):
score += w[i+self.n_feats*c]*feature_vector[i]
if training == True:
score += (c!=label)
if score > max_score:
max_score = score
ypred = c
res = ResultSet()
res.score = max_score
res.psi_pred = RealVector(self.dim)
res.psi_pred.zero()
for i in xrange(self.n_feats):
res.psi_pred[i+self.n_feats*ypred] = feature_vector[i]
res.argmax = RealNumber(ypred)
if training == True:
res.delta = (label!=ypred)
res.psi_truth = RealVector(self.dim)
res.psi_truth.zero()
for i in xrange(self.n_feats):
res.psi_truth[i+self.n_feats*label] = feature_vector[i]
for i in xrange(self.n_feats):
res.score -= w[i+self.n_feats*label]*feature_vector[i]
return res
def fill_data(cnt, minv, maxv):
x1 = np.linspace(minv, maxv, cnt)
a, b = np.meshgrid(x1, x1)
X = np.array((np.ravel(a), np.ravel(b)))
y = np.zeros((1, cnt*cnt))
tmp = cnt*cnt;
y[0, tmp/3:(tmp/3)*2]=1
y[0, tmp/3*2:(tmp/3)*3]=2
return X, y.flatten()
def gen_data():
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(N, dim), covs[0]) + np.array([0, 10]),
np.dot(np.random.randn(N, dim), covs[1]) + np.array([-10, -10]),
np.dot(np.random.randn(N, dim), covs[2]) + np.array([10, -10])];
Y = np.hstack((np.zeros(N), np.ones(N), 2*np.ones(N)))
return X, Y
def get_so_labels(out):
N = out.get_num_labels()
l = np.zeros(N)
for i in xrange(N):
l[i] = RealNumber.obtain_from_generic(out.get_label(i)).value
return l
# Number of classes
M = 3
# Number of samples of each class
N = 10
# Dimension of the data
dim = 2
X, y = gen_data()
cnt = 50
X2, y2 = fill_data(cnt, np.min(X), np.max(X))
labels = MulticlassSOLabels(y)
features = RealFeatures(X.T)
model = MulticlassStructuredModel(features, labels)
lambda_ = 1e1
sosvm = DualLibQPBMSOSVM(model, labels, lambda_)
sosvm.set_cleanAfter(10) # number of iterations that cutting plane has to be inactive for to be removed
sosvm.set_cleanICP(True) # enables inactive cutting plane removal feature
sosvm.set_TolRel(0.001) # set relative tolerance
sosvm.set_verbose(True) # enables verbosity of the solver
sosvm.set_cp_models(16) # set number of cutting plane models
sosvm.set_solver(BMRM) # select training algorithm
#sosvm.set_solver(PPBMRM)
#sosvm.set_solver(P3BMRM)
sosvm.train()
res = sosvm.get_result()
Fps = res.get_hist_Fp_vector()
Fds = res.get_hist_Fd_vector()
wdists = res.get_hist_wdist_vector()
plt.figure()
plt.subplot(221)
plt.title('Fp and Fd history')
plt.plot(xrange(res.get_n_iters()), Fps, hold=True)
plt.plot(xrange(res.get_n_iters()), Fds, hold=True)
plt.subplot(222)
plt.title('w dist history')
plt.plot(xrange(res.get_n_iters()), wdists)
# Evaluation
out = sosvm.apply()
Evaluation = StructuredAccuracy()
acc = Evaluation.evaluate(out, labels)
print "Correct classification rate: %0.4f%%" % ( 100.0*acc )
# show figure
Z = get_so_labels(sosvm.apply(RealFeatures(X2)))
x = (X2[0,:]).reshape(cnt, cnt)
y = (X2[1,:]).reshape(cnt, cnt)
z = Z.reshape(cnt, cnt)
plt.subplot(223)
plt.pcolor(x, y, z, shading='interp')
plt.contour(x, y, z, linewidths=1, colors='black', hold=True)
plt.plot(X[:,0], X[:,1], 'yo')
plt.axis('tight')
plt.title('Classification')
plt.show()
|
gpl-3.0
|
hainm/scikit-learn
|
examples/ensemble/plot_bias_variance.py
|
357
|
7324
|
"""
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
|
bsd-3-clause
|
wkfwkf/statsmodels
|
examples/python/tsa_filters.py
|
34
|
4559
|
## Time Series Filters
from __future__ import print_function
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
dta = sm.datasets.macrodata.load_pandas().data
index = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3'))
print(index)
dta.index = index
del dta['year']
del dta['quarter']
print(sm.datasets.macrodata.NOTE)
print(dta.head(10))
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
dta.realgdp.plot(ax=ax);
legend = ax.legend(loc = 'upper left');
legend.prop.set_size(20);
#### Hodrick-Prescott Filter
# The Hodrick-Prescott filter separates a time-series $y_t$ into a trend $\tau_t$ and a cyclical component $\zeta_t$
#
# $$y_t = \tau_t + \zeta_t$$
#
# The components are determined by minimizing the following quadratic loss function
#
# $$\min_{\\{ \tau_{t}\\} }\sum_{t}^{T}\zeta_{t}^{2}+\lambda\sum_{t=1}^{T}\left[\left(\tau_{t}-\tau_{t-1}\right)-\left(\tau_{t-1}-\tau_{t-2}\right)\right]^{2}$$
gdp_cycle, gdp_trend = sm.tsa.filters.hpfilter(dta.realgdp)
gdp_decomp = dta[['realgdp']]
gdp_decomp["cycle"] = gdp_cycle
gdp_decomp["trend"] = gdp_trend
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
gdp_decomp[["realgdp", "trend"]]["2000-03-31":].plot(ax=ax, fontsize=16);
legend = ax.get_legend()
legend.prop.set_size(20);
#### Baxter-King approximate band-pass filter: Inflation and Unemployment
##### Explore the hypothesis that inflation and unemployment are counter-cyclical.
# The Baxter-King filter is intended to explictly deal with the periodicty of the business cycle. By applying their band-pass filter to a series, they produce a new series that does not contain fluctuations at higher or lower than those of the business cycle. Specifically, the BK filter takes the form of a symmetric moving average
#
# $$y_{t}^{*}=\sum_{k=-K}^{k=K}a_ky_{t-k}$$
#
# where $a_{-k}=a_k$ and $\sum_{k=-k}^{K}a_k=0$ to eliminate any trend in the series and render it stationary if the series is I(1) or I(2).
#
# For completeness, the filter weights are determined as follows
#
# $$a_{j} = B_{j}+\theta\text{ for }j=0,\pm1,\pm2,\dots,\pm K$$
#
# $$B_{0} = \frac{\left(\omega_{2}-\omega_{1}\right)}{\pi}$$
# $$B_{j} = \frac{1}{\pi j}\left(\sin\left(\omega_{2}j\right)-\sin\left(\omega_{1}j\right)\right)\text{ for }j=0,\pm1,\pm2,\dots,\pm K$$
#
# where $\theta$ is a normalizing constant such that the weights sum to zero.
#
# $$\theta=\frac{-\sum_{j=-K^{K}b_{j}}}{2K+1}$$
#
# $$\omega_{1}=\frac{2\pi}{P_{H}}$$
#
# $$\omega_{2}=\frac{2\pi}{P_{L}}$$
#
# $P_L$ and $P_H$ are the periodicity of the low and high cut-off frequencies. Following Burns and Mitchell's work on US business cycles which suggests cycles last from 1.5 to 8 years, we use $P_L=6$ and $P_H=32$ by default.
bk_cycles = sm.tsa.filters.bkfilter(dta[["infl","unemp"]])
# * We lose K observations on both ends. It is suggested to use K=12 for quarterly data.
fig = plt.figure(figsize=(14,10))
ax = fig.add_subplot(111)
bk_cycles.plot(ax=ax, style=['r--', 'b-']);
#### Christiano-Fitzgerald approximate band-pass filter: Inflation and Unemployment
# The Christiano-Fitzgerald filter is a generalization of BK and can thus also be seen as weighted moving average. However, the CF filter is asymmetric about $t$ as well as using the entire series. The implementation of their filter involves the
# calculations of the weights in
#
# $$y_{t}^{*}=B_{0}y_{t}+B_{1}y_{t+1}+\dots+B_{T-1-t}y_{T-1}+\tilde B_{T-t}y_{T}+B_{1}y_{t-1}+\dots+B_{t-2}y_{2}+\tilde B_{t-1}y_{1}$$
#
# for $t=3,4,...,T-2$, where
#
# $$B_{j} = \frac{\sin(jb)-\sin(ja)}{\pi j},j\geq1$$
#
# $$B_{0} = \frac{b-a}{\pi},a=\frac{2\pi}{P_{u}},b=\frac{2\pi}{P_{L}}$$
#
# $\tilde B_{T-t}$ and $\tilde B_{t-1}$ are linear functions of the $B_{j}$'s, and the values for $t=1,2,T-1,$ and $T$ are also calculated in much the same way. $P_{U}$ and $P_{L}$ are as described above with the same interpretation.
# The CF filter is appropriate for series that may follow a random walk.
print(sm.tsa.stattools.adfuller(dta['unemp'])[:3])
print(sm.tsa.stattools.adfuller(dta['infl'])[:3])
cf_cycles, cf_trend = sm.tsa.filters.cffilter(dta[["infl","unemp"]])
print(cf_cycles.head(10))
fig = plt.figure(figsize=(14,10))
ax = fig.add_subplot(111)
cf_cycles.plot(ax=ax, style=['r--','b-']);
# Filtering assumes *a priori* that business cycles exist. Due to this assumption, many macroeconomic models seek to create models that match the shape of impulse response functions rather than replicating properties of filtered series. See VAR notebook.
|
bsd-3-clause
|
pv/scikit-learn
|
sklearn/tests/test_isotonic.py
|
230
|
11087
|
import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
|
bsd-3-clause
|
jaeilepp/eggie
|
mne/inverse_sparse/mxne_optim.py
|
3
|
21727
|
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>
#
# License: Simplified BSD
import warnings
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from .mxne_debiasing import compute_bias
from ..utils import logger, verbose, sum_squared
from ..time_frequency.stft import stft_norm2, stft, istft
def groups_norm2(A, n_orient):
"""compute squared L2 norms of groups inplace"""
n_positions = A.shape[0] // n_orient
return np.sum(np.power(A, 2, A).reshape(n_positions, -1), axis=1)
def norm_l2inf(A, n_orient, copy=True):
"""L2-inf norm"""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return sqrt(np.max(groups_norm2(A, n_orient)))
def norm_l21(A, n_orient, copy=True):
"""L21 norm"""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return np.sum(np.sqrt(groups_norm2(A, n_orient)))
def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
"""proximity operator for l21 norm
L2 over columns and L1 over rows => groups contain n_orient rows.
It can eventually take into account the negative frequencies
when a complex value is passed and is_stft=True.
Example
-------
>>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y)
[[ 0. 4. 3. 0. 0.]
[ 0. 4. 3. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l21(Y, 2, 2)
>>> print(Yp)
[[ 0. 2.86862915 2.15147186 0. 0. ]
[ 0. 2.86862915 2.15147186 0. 0. ]]
>>> print(active_set)
[ True True False False]
"""
if len(Y) == 0:
return np.zeros_like(Y), np.zeros((0,), dtype=np.bool)
if shape is not None:
shape_init = Y.shape
Y = Y.reshape(*shape)
n_positions = Y.shape[0] // n_orient
if is_stft:
rows_norm = np.sqrt(stft_norm2(Y).reshape(n_positions, -1).sum(axis=1))
else:
rows_norm = np.sqrt(np.sum((np.abs(Y) ** 2).reshape(n_positions, -1),
axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(rows_norm, alpha), 0.0)
active_set = shrink > 0.0
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
shrink = np.tile(shrink[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if shape is None:
Y *= shrink[active_set][:, np.newaxis]
else:
Y *= shrink[active_set][:, np.newaxis, np.newaxis]
Y = Y.reshape(-1, *shape_init[1:])
return Y, active_set
def prox_l1(Y, alpha, n_orient):
"""proximity operator for l1 norm with multiple orientation support
L2 over orientation and L1 over position (space + time)
Example
-------
>>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y)
[[ 1. 2. 3. 2. 0.]
[ 1. 2. 3. 2. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l1(Y, 2, 2)
>>> print(Yp)
[[ 0. 0.58578644 1.58578644 0.58578644 0. ]
[ 0. 0.58578644 1.58578644 0.58578644 0. ]]
>>> print(active_set)
[ True True False False]
"""
n_positions = Y.shape[0] // n_orient
norms = np.sqrt(np.sum((np.abs(Y) ** 2).T.reshape(-1, n_orient), axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(norms, alpha), 0.0)
shrink = shrink.reshape(-1, n_positions).T
active_set = np.any(shrink > 0.0, axis=1)
shrink = shrink[active_set]
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if len(Y) > 0:
for o in range(n_orient):
Y[o::n_orient] *= shrink
return Y, active_set
def dgap_l21(M, G, X, active_set, alpha, n_orient):
"""Duality gaps for the mixed norm inverse problem
For details see:
Gramfort A., Kowalski M. and Hamalainen, M,
Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods, Physics in Medicine and Biology, 2012
http://dx.doi.org/10.1088/0031-9155/57/7/1937
Parameters
----------
M : array of shape [n_sensors, n_times]
data
G : array of shape [n_sensors, n_active]
Gain matrix a.k.a. lead field
X : array of shape [n_active, n_times]
Sources
active_set : array of bool
Mask of active sources
alpha : float
Regularization parameter
n_orient : int
Number of dipoles per locations (typically 1 or 3)
Returns
-------
gap : float
Dual gap
pobj : float
Primal cost
dobj : float
Dual cost. gap = pobj - dobj
R : array of shape [n_sensors, n_times]
Current residual of M - G * X
"""
GX = np.dot(G[:, active_set], X)
R = M - GX
penalty = norm_l21(X, n_orient, copy=True)
nR2 = sum_squared(R)
pobj = 0.5 * nR2 + alpha * penalty
dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False)
scaling = alpha / dual_norm
scaling = min(scaling, 1.0)
dobj = 0.5 * (scaling ** 2) * nR2 + scaling * np.sum(R * GX)
gap = pobj - dobj
return gap, pobj, dobj, R
@verbose
def _mixed_norm_solver_prox(M, G, alpha, maxit=200, tol=1e-8, verbose=None,
init=None, n_orient=1):
"""Solves L21 inverse problem with proximal iterations and FISTA"""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
lipschitz_constant = 1.1 * linalg.norm(G, ord=2) ** 2
if n_sources < n_sensors:
gram = np.dot(G.T, G)
GTM = np.dot(G.T, M)
else:
gram = None
if init is None:
X = 0.0
R = M.copy()
if gram is not None:
R = np.dot(G.T, R)
else:
X = init
if gram is None:
R = M - np.dot(G, X)
else:
R = GTM - np.dot(gram, X)
t = 1.0
Y = np.zeros((n_sources, n_times)) # FISTA aux variable
E = [] # track cost function
active_set = np.ones(n_sources, dtype=np.bool) # start with full AS
for i in range(maxit):
X0, active_set_0 = X, active_set # store previous values
if gram is None:
Y += np.dot(G.T, R) / lipschitz_constant # ISTA step
else:
Y += R / lipschitz_constant # ISTA step
X, active_set = prox_l21(Y, alpha / lipschitz_constant, n_orient)
t0 = t
t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
Y.fill(0.0)
dt = ((t0 - 1.0) / t)
Y[active_set] = (1.0 + dt) * X
Y[active_set_0] -= dt * X0
Y_as = active_set_0 | active_set
if gram is None:
R = M - np.dot(G[:, Y_as], Y[Y_as])
else:
R = GTM - np.dot(gram[:, Y_as], Y[Y_as])
gap, pobj, dobj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
E.append(pobj)
logger.debug("pobj : %s -- gap : %s" % (pobj, gap))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)' % (gap, tol))
break
return X, active_set, E
@verbose
def _mixed_norm_solver_cd(M, G, alpha, maxit=10000, tol=1e-8,
verbose=None, init=None, n_orient=1):
"""Solves L21 inverse problem with coordinate descent"""
from sklearn.linear_model.coordinate_descent import MultiTaskLasso
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
if init is not None:
init = init.T
clf = MultiTaskLasso(alpha=alpha / len(M), tol=tol, normalize=False,
fit_intercept=False, max_iter=maxit,
warm_start=True)
clf.coef_ = init
clf.fit(G, M)
X = clf.coef_.T
active_set = np.any(X, axis=1)
X = X[active_set]
gap, pobj, dobj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
return X, active_set, pobj
@verbose
def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
active_set_size=50, debias=True, n_orient=1,
solver='auto'):
"""Solves L21 inverse solver with active set strategy
Algorithm is detailed in:
Gramfort A., Kowalski M. and Hamalainen, M,
Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods, Physics in Medicine and Biology, 2012
http://dx.doi.org/10.1088/0031-9155/57/7/1937
Parameters
----------
M : array
The data
G : array
The forward operator
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
maxit : int
The number of iterations
tol : float
Tolerance on dual gap for convergence checking
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
solver : 'prox' | 'cd' | 'auto'
The algorithm to use for the optimization.
Returns
-------
X : array
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
"""
n_dipoles = G.shape[1]
n_positions = n_dipoles // n_orient
alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
logger.info("-- ALPHA MAX : %s" % alpha_max)
alpha = float(alpha)
has_sklearn = True
try:
from sklearn.linear_model.coordinate_descent import MultiTaskLasso
except ImportError:
has_sklearn = False
if solver == 'auto':
if has_sklearn and (n_orient == 1):
solver = 'cd'
else:
solver = 'prox'
if solver == 'cd':
if n_orient == 1 and not has_sklearn:
warnings.warn("Scikit-learn >= 0.12 cannot be found. "
"Using proximal iterations instead of coordinate "
"descent.")
solver = 'prox'
if n_orient > 1:
warnings.warn("Coordinate descent is only available for fixed "
"orientation. Using proximal iterations instead of "
"coordinate descent")
solver = 'prox'
if solver == 'cd':
logger.info("Using coordinate descent")
l21_solver = _mixed_norm_solver_cd
else:
logger.info("Using proximal iterations")
l21_solver = _mixed_norm_solver_prox
if active_set_size is not None:
X_init = None
n_sensors, n_times = M.shape
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient))
active_set = np.zeros(n_positions, dtype=np.bool)
active_set[idx_large_corr[-active_set_size:]] = True
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
for k in range(maxit):
X, as_, E = l21_solver(M, G[:, active_set], alpha,
maxit=maxit, tol=tol, init=X_init,
n_orient=n_orient)
as_ = np.where(active_set)[0][as_]
gap, pobj, dobj, R = dgap_l21(M, G, X, as_, alpha, n_orient)
logger.info('gap = %s, pobj = %s' % (gap, pobj))
if gap < tol:
logger.info('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
else: # add sources
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R),
n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :])
new_active_idx = new_active_idx.ravel()
idx_old_active_set = as_
active_set_old = active_set.copy()
active_set[new_active_idx] = True
as_size = np.sum(active_set)
logger.info('active set size %s' % as_size)
X_init = np.zeros((as_size, n_times), dtype=X.dtype)
idx_active_set = np.where(active_set)[0]
idx = np.searchsorted(idx_active_set, idx_old_active_set)
X_init[idx] = X
if np.all(active_set_old == active_set):
logger.info('Convergence stopped (AS did not change) !')
break
else:
logger.warning('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
active_set = np.zeros_like(active_set)
active_set[as_] = True
else:
X, active_set, E = l21_solver(M, G, alpha, maxit=maxit,
tol=tol, n_orient=n_orient)
if (active_set.sum() > 0) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
###############################################################################
# TF-MxNE
@verbose
def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None):
"""Compute lipschitz constant for FISTA
It uses a power iteration method.
"""
n_times = M.shape[1]
n_points = G.shape[1]
iv = np.ones((n_points, n_times), dtype=np.float)
v = phi(iv)
L = 1e100
for it in range(100):
L_old = L
logger.info('Lipschitz estimation: iteration = %d' % it)
iv = np.real(phiT(v))
Gv = np.dot(G, iv)
GtGv = np.dot(G.T, Gv)
w = phi(GtGv)
L = np.max(np.abs(w)) # l_inf norm
v = w / L
if abs((L - L_old) / L_old) < tol:
break
return L
def safe_max_abs(A, ia):
"""Compute np.max(np.abs(A[ia])) possible with empty A"""
if np.sum(ia): # ia is not empty
return np.max(np.abs(A[ia]))
else:
return 0.
def safe_max_abs_diff(A, ia, B, ib):
"""Compute np.max(np.abs(A)) possible with empty A"""
A = A[ia] if np.sum(ia) else 0.0
B = B[ib] if np.sum(ia) else 0.0
return np.max(np.abs(A - B))
class _Phi(object):
"""Util class to have phi stft as callable without using
a lambda that does not pickle"""
def __init__(self, wsize, tstep, n_coefs):
self.wsize = wsize
self.tstep = tstep
self.n_coefs = n_coefs
def __call__(self, x):
return stft(x, self.wsize, self.tstep,
verbose=False).reshape(-1, self.n_coefs)
class _PhiT(object):
"""Util class to have phi.T istft as callable without using
a lambda that does not pickle"""
def __init__(self, tstep, n_freq, n_step, n_times):
self.tstep = tstep
self.n_freq = n_freq
self.n_step = n_step
self.n_times = n_times
def __call__(self, z):
return istft(z.reshape(-1, self.n_freq, self.n_step), self.tstep,
self.n_times)
@verbose
def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
n_orient=1, maxit=200, tol=1e-8, log_objective=True,
lipschitz_constant=None, debias=True, verbose=None):
"""Solves TF L21+L1 inverse solver
Algorithm is detailed in:
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations
Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
DOI: 10.1016/j.neuroimage.2012.12.051.
Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries
Gramfort A., Strohmeier D., Haueisen J., Hamalainen M. and Kowalski M.
INFORMATION PROCESSING IN MEDICAL IMAGING
Lecture Notes in Computer Science, 2011, Volume 6801/2011,
600-611, DOI: 10.1007/978-3-642-22092-0_49
http://dx.doi.org/10.1007/978-3-642-22092-0_49
Parameters
----------
M : array
The data.
G : array
The forward operator.
alpha_space : float
The spatial regularization parameter. It should be between 0 and 100.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series.
wsize: int
length of the STFT window in samples (must be a multiple of 4).
tstep: int
step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
maxit : int
The number of iterations.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached.
log_objective : bool
If True, the value of the minimized objective function is computed
and stored at every iteration.
lipschitz_constant : float | None
The lipschitz constant of the spatio temporal linear operator.
If None it is estimated.
debias : bool
Debias source estimates.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
X : array
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function at each iteration. If log_objective
is False, it will be empty.
"""
n_sensors, n_times = M.shape
n_dipoles = G.shape[1]
n_step = int(ceil(n_times / float(tstep)))
n_freq = wsize // 2 + 1
n_coefs = n_step * n_freq
phi = _Phi(wsize, tstep, n_coefs)
phiT = _PhiT(tstep, n_freq, n_step, n_times)
Z = np.zeros((0, n_coefs), dtype=np.complex)
active_set = np.zeros(n_dipoles, dtype=np.bool)
R = M.copy() # residual
if lipschitz_constant is None:
lipschitz_constant = 1.1 * tf_lipschitz_constant(M, G, phi, phiT)
logger.info("lipschitz_constant : %s" % lipschitz_constant)
t = 1.0
Y = np.zeros((n_dipoles, n_coefs), dtype=np.complex) # FISTA aux variable
Y[active_set] = Z
E = [] # track cost function
Y_time_as = None
Y_as = None
alpha_time_lc = alpha_time / lipschitz_constant
alpha_space_lc = alpha_space / lipschitz_constant
for i in range(maxit):
Z0, active_set_0 = Z, active_set # store previous values
if active_set.sum() < len(R) and Y_time_as is not None:
# trick when using tight frame to do a first screen based on
# L21 prox (L21 norms are not changed by phi)
GTR = np.dot(G.T, R) / lipschitz_constant
A = GTR.copy()
A[Y_as] += Y_time_as
_, active_set_l21 = prox_l21(A, alpha_space_lc, n_orient)
# just compute prox_l1 on rows that won't be zeroed by prox_l21
B = Y[active_set_l21] + phi(GTR[active_set_l21])
Z, active_set_l1 = prox_l1(B, alpha_time_lc, n_orient)
active_set_l21[active_set_l21] = active_set_l1
active_set_l1 = active_set_l21
else:
Y += np.dot(G.T, phi(R)) / lipschitz_constant # ISTA step
Z, active_set_l1 = prox_l1(Y, alpha_time_lc, n_orient)
Z, active_set_l21 = prox_l21(Z, alpha_space_lc, n_orient,
shape=(-1, n_freq, n_step), is_stft=True)
active_set = active_set_l1
active_set[active_set_l1] = active_set_l21
# Check convergence : max(abs(Z - Z0)) < tol
stop = (safe_max_abs(Z, ~active_set_0[active_set]) < tol and
safe_max_abs(Z0, ~active_set[active_set_0]) < tol and
safe_max_abs_diff(Z, active_set_0[active_set],
Z0, active_set[active_set_0]) < tol)
if stop:
print('Convergence reached !')
break
# FISTA 2 steps
# compute efficiently : Y = Z + ((t0 - 1.0) / t) * (Z - Z0)
t0 = t
t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
Y.fill(0.0)
dt = ((t0 - 1.0) / t)
Y[active_set] = (1.0 + dt) * Z
if len(Z0):
Y[active_set_0] -= dt * Z0
Y_as = active_set_0 | active_set
Y_time_as = phiT(Y[Y_as])
R = M - np.dot(G[:, Y_as], Y_time_as)
if log_objective: # log cost function value
Z2 = np.abs(Z)
Z2 **= 2
X = phiT(Z)
RZ = M - np.dot(G[:, active_set], X)
pobj = 0.5 * linalg.norm(RZ, ord='fro') ** 2 \
+ alpha_space * norm_l21(X, n_orient) \
+ alpha_time * np.sqrt(np.sum(Z2.T.reshape(-1, n_orient),
axis=1)).sum()
E.append(pobj)
logger.info("Iteration %d :: pobj %f :: n_active %d" % (i + 1,
pobj, np.sum(active_set)))
else:
logger.info("Iteration %d" % i + 1)
X = phiT(Z)
if (active_set.sum() > 0) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
|
bsd-2-clause
|
glouppe/scikit-learn
|
sklearn/tests/test_base.py
|
45
|
7049
|
# Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
|
bsd-3-clause
|
jjsalomon/python-analytics
|
pandas3 - Data Manipulation/pandas10 - Data Aggregation - GroupBy.py
|
1
|
3153
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 30 21:57:10 2017
@author: azkei
The last stage is Data Aggregation.
Introduction:
For Data Aggregation, you generally mean a transofmration that produces a single
integer from an array. In fact you have already made many operations of
Data Aggregation, for example, when we calculated the sum(), mean() and count().
These functions operate on a set of Data and perform a calculation with a
consistent result consisting of a single value.
However a more formal manner and the one with more control in data aggregation is that
which involves the categorization of a set.
The categorization of a set data carried out for grouping is often a critical stage.
In the process of data analysis, it is a process of transformation after the
division into diferent groups, you apply a function that converts or transforms the data
in some way depending on the group they belong to.
Very often the two phases of grouping and apaplication of a function are performed
in a single step.
Also for this part of the data analysis, pandas provides a tool very flexible
and high performance: GroupBy
Generally it refers to its internal mechanism as a process called
Split-Apply-Combine with three operations
1. Splitting: Division into groups of data sets
2. Applying: Application of a function on each group
3. Combining: Combination of all the results obtained by different groups
"""
# 1. A practical example
# First generate data
frame = pd.DataFrame({'color':['white','red','green','red','green'],
'object':['pen','pencil','pencil','ashtray','pen'],
'price1':[5.56,4.20,1.30,0.56,2.75],
'price2':[4.75,4.12,1.60,0.75,3.15]})
frame
# Suppose we want to calculate the average price1 column using group labels
# listed in the column color.
group = frame['price1'].groupby(frame['color'])
# The result is a GroupBy object - the operation was not calculation, it was just
# to collect all the information needed to calculate to be executed.
# This process is called grouping - which all rows having the same value of color
# are grouped into a single item.
# To analyze this more..
group.groups
# Each group is specified into where it starts and ends on the rows
# Now its sufficient to apply the operation on the group to obtain results
# for each group
# Mean
group.mean()
# Sum
group.sum()
# 2. Hierarchical Grouping
# We have seen how to group the data according to the values of a column
# as a key choice. The same thing can be extended to multiple columns
# i.e. make a grouping of multiple keys hierarchical
ggroup = frame['price1'].groupby([frame['color'],frame['object']])
ggroup
ggroup.sum()
# So far we have applied the grouping to a single column of data, but in reality
# it can be extended to multiple columns or the entire DataFrame.
# We do not need to reuse the object GroupBy serveral times,
# it is convenient to combine in a single passing all of the grouping and calculation
# to be done, without any intermediate variable
frame[['price1','price2']].groupby(frame['color']).mean()
frame.groupby(frame['color']).mean()
|
mit
|
hugobowne/scikit-learn
|
sklearn/neighbors/tests/test_kde.py
|
80
|
5560
|
import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
|
bsd-3-clause
|
ahoyosid/scikit-learn
|
sklearn/decomposition/tests/test_nmf.py
|
14
|
6123
|
import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
|
bsd-3-clause
|
GT-IDEaS/SkillsWorkshop2017
|
Week03/k.means.MariaSotoGiron.py
|
2
|
1305
|
#!/usr/bin/env python
#https://www.datascience.com/blog/introduction-to-k-means-clustering-algorithm-learn-data-science-tutorials
import numpy as np
from matplotlib import pyplot
#import numpy.core.multiarray
from sklearn.cluster import KMeans
import pandas as pd
### For the purposes of this example, we store feature data from our
### dataframe `df`, in the `f1` and `f2` arrays. We combine this into
### a feature matrix `X` before entering it into the algorithm.
# Read input file .csv
df = pd.read_csv(
filepath_or_buffer='faithful.csv', sep=',')
#print (df)
f1 = df['eruptions'].values
f2 = df['waiting'].values
data=np.matrix(zip(f1,f2))
k=2
kmeans = KMeans(n_clusters=k).fit(data)
#The cluster labels are returned in kmeans.labels_.
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
#plot clustering
for i in range(k):
# select only data observations with cluster label == i
ds = data[np.where(labels==i)]
# plot the data observations
pyplot.plot(ds[:,0],ds[:,1],'o')
# plot the centroids
lines = pyplot.plot(centroids[i,0],centroids[i,1],'kx')
# make the centroid x's bigger
pyplot.setp(lines,ms=15.0)
pyplot.setp(lines,mew=2.0)
pyplot.show()
#Step 4: Iterate Over Several Values of K
#kmeans = KMeans(n_clusters=4).fit(X)
|
bsd-3-clause
|
petosegan/scikit-learn
|
sklearn/tests/test_common.py
|
127
|
7665
|
"""
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
|
bsd-3-clause
|
jordancheah/zipline
|
tests/modelling/test_us_equity_pricing_loader.py
|
15
|
23196
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for zipline.data.ffc.loaders.us_equity_pricing
"""
from unittest import TestCase
from nose_parameterized import parameterized
from numpy import (
arange,
datetime64,
uint32,
)
from numpy.testing import (
assert_allclose,
assert_array_equal,
)
from pandas import (
concat,
DataFrame,
DatetimeIndex,
Timestamp,
)
from pandas.util.testing import assert_index_equal
from testfixtures import TempDirectory
from zipline.lib.adjustment import Float64Multiply
from zipline.data.equities import USEquityPricing
from zipline.data.ffc.synthetic import (
NullAdjustmentReader,
SyntheticDailyBarWriter,
)
from zipline.data.ffc.loaders.us_equity_pricing import (
BcolzDailyBarReader,
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
USEquityPricingLoader,
)
from zipline.errors import WindowLengthTooLong
from zipline.finance.trading import TradingEnvironment
from zipline.utils.test_utils import (
seconds_to_timestamp,
str_to_seconds,
)
# Test calendar ranges over the month of June 2015
# June 2015
# Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
TEST_CALENDAR_START = Timestamp('2015-06-01', tz='UTC')
TEST_CALENDAR_STOP = Timestamp('2015-06-30', tz='UTC')
TEST_QUERY_START = Timestamp('2015-06-10', tz='UTC')
TEST_QUERY_STOP = Timestamp('2015-06-19', tz='UTC')
# One asset for each of the cases enumerated in load_raw_arrays_from_bcolz.
EQUITY_INFO = DataFrame(
[
# 1) The equity's trades start and end before query.
{'start_date': '2015-06-01', 'end_date': '2015-06-05'},
# 2) The equity's trades start and end after query.
{'start_date': '2015-06-22', 'end_date': '2015-06-30'},
# 3) The equity's data covers all dates in range.
{'start_date': '2015-06-02', 'end_date': '2015-06-30'},
# 4) The equity's trades start before the query start, but stop
# before the query end.
{'start_date': '2015-06-01', 'end_date': '2015-06-15'},
# 5) The equity's trades start and end during the query.
{'start_date': '2015-06-12', 'end_date': '2015-06-18'},
# 6) The equity's trades start during the query, but extend through
# the whole query.
{'start_date': '2015-06-15', 'end_date': '2015-06-25'},
],
index=arange(1, 7),
columns=['start_date', 'end_date'],
).astype(datetime64)
TEST_QUERY_ASSETS = EQUITY_INFO.index
class BcolzDailyBarTestCase(TestCase):
def setUp(self):
all_trading_days = TradingEnvironment.instance().trading_days
self.trading_days = all_trading_days[
all_trading_days.get_loc(TEST_CALENDAR_START):
all_trading_days.get_loc(TEST_CALENDAR_STOP) + 1
]
self.asset_info = EQUITY_INFO
self.writer = SyntheticDailyBarWriter(
self.asset_info,
self.trading_days,
)
self.dir_ = TempDirectory()
self.dir_.create()
self.dest = self.dir_.getpath('daily_equity_pricing.bcolz')
def tearDown(self):
self.dir_.cleanup()
@property
def assets(self):
return self.asset_info.index
def trading_days_between(self, start, end):
return self.trading_days[self.trading_days.slice_indexer(start, end)]
def asset_start(self, asset_id):
return self.writer.asset_start(asset_id)
def asset_end(self, asset_id):
return self.writer.asset_end(asset_id)
def dates_for_asset(self, asset_id):
start, end = self.asset_start(asset_id), self.asset_end(asset_id)
return self.trading_days_between(start, end)
def test_write_ohlcv_content(self):
result = self.writer.write(self.dest, self.trading_days, self.assets)
for column in SyntheticDailyBarWriter.OHLCV:
idx = 0
data = result[column][:]
multiplier = 1 if column == 'volume' else 1000
for asset_id in self.assets:
for date in self.dates_for_asset(asset_id):
self.assertEqual(
SyntheticDailyBarWriter.expected_value(
asset_id,
date,
column
) * multiplier,
data[idx],
)
idx += 1
self.assertEqual(idx, len(data))
def test_write_day_and_id(self):
result = self.writer.write(self.dest, self.trading_days, self.assets)
idx = 0
ids = result['id']
days = result['day']
for asset_id in self.assets:
for date in self.dates_for_asset(asset_id):
self.assertEqual(ids[idx], asset_id)
self.assertEqual(date, seconds_to_timestamp(days[idx]))
idx += 1
def test_write_attrs(self):
result = self.writer.write(self.dest, self.trading_days, self.assets)
expected_first_row = {
'1': 0,
'2': 5, # Asset 1 has 5 trading days.
'3': 12, # Asset 2 has 7 trading days.
'4': 33, # Asset 3 has 21 trading days.
'5': 44, # Asset 4 has 11 trading days.
'6': 49, # Asset 5 has 5 trading days.
}
expected_last_row = {
'1': 4,
'2': 11,
'3': 32,
'4': 43,
'5': 48,
'6': 57, # Asset 6 has 9 trading days.
}
expected_calendar_offset = {
'1': 0, # Starts on 6-01, 1st trading day of month.
'2': 15, # Starts on 6-22, 16th trading day of month.
'3': 1, # Starts on 6-02, 2nd trading day of month.
'4': 0, # Starts on 6-01, 1st trading day of month.
'5': 9, # Starts on 6-12, 10th trading day of month.
'6': 10, # Starts on 6-15, 11th trading day of month.
}
self.assertEqual(result.attrs['first_row'], expected_first_row)
self.assertEqual(result.attrs['last_row'], expected_last_row)
self.assertEqual(
result.attrs['calendar_offset'],
expected_calendar_offset,
)
assert_index_equal(
self.trading_days,
DatetimeIndex(result.attrs['calendar'], tz='UTC'),
)
def _check_read_results(self, columns, assets, start_date, end_date):
table = self.writer.write(self.dest, self.trading_days, self.assets)
reader = BcolzDailyBarReader(table)
dates = self.trading_days_between(start_date, end_date)
results = reader.load_raw_arrays(columns, dates, assets)
for column, result in zip(columns, results):
assert_array_equal(
result,
self.writer.expected_values_2d(
dates,
assets,
column.name,
)
)
@parameterized.expand([
([USEquityPricing.open],),
([USEquityPricing.close, USEquityPricing.volume],),
([USEquityPricing.volume, USEquityPricing.high, USEquityPricing.low],),
(USEquityPricing.columns,),
])
def test_read(self, columns):
self._check_read_results(
columns,
self.assets,
TEST_QUERY_START,
TEST_QUERY_STOP,
)
def test_start_on_asset_start(self):
"""
Test loading with queries that starts on the first day of each asset's
lifetime.
"""
columns = [USEquityPricing.high, USEquityPricing.volume]
for asset in self.assets:
self._check_read_results(
columns,
self.assets,
start_date=self.asset_start(asset),
end_date=self.trading_days[-1],
)
def test_start_on_asset_end(self):
"""
Test loading with queries that start on the last day of each asset's
lifetime.
"""
columns = [USEquityPricing.close, USEquityPricing.volume]
for asset in self.assets:
self._check_read_results(
columns,
self.assets,
start_date=self.asset_end(asset),
end_date=self.trading_days[-1],
)
def test_end_on_asset_start(self):
"""
Test loading with queries that end on the first day of each asset's
lifetime.
"""
columns = [USEquityPricing.close, USEquityPricing.volume]
for asset in self.assets:
self._check_read_results(
columns,
self.assets,
start_date=self.trading_days[0],
end_date=self.asset_start(asset),
)
def test_end_on_asset_end(self):
"""
Test loading with queries that end on the last day of each asset's
lifetime.
"""
columns = [USEquityPricing.close, USEquityPricing.volume]
for asset in self.assets:
self._check_read_results(
columns,
self.assets,
start_date=self.trading_days[0],
end_date=self.asset_end(asset),
)
# ADJUSTMENTS use the following scheme to indicate information about the value
# upon inspection.
#
# 1s place is the equity
#
# 0.1s place is the action type, with:
#
# splits, 1
# mergers, 2
# dividends, 3
#
# 0.001s is the date
SPLITS = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-03'),
'ratio': 1.103,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 3.110,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 3.112,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-21'),
'ratio': 6.121,
'sid': 6},
# Another action in query range, should have last_row of 1
{'effective_date': str_to_seconds('2015-06-11'),
'ratio': 3.111,
'sid': 3},
# Last day of range. Should have last_row of 7
{'effective_date': str_to_seconds('2015-06-19'),
'ratio': 3.119,
'sid': 3},
],
columns=['effective_date', 'ratio', 'sid'],
)
MERGERS = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-03'),
'ratio': 1.203,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 3.210,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 3.212,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-25'),
'ratio': 6.225,
'sid': 6},
# Another action in query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 4.212,
'sid': 4},
# Last day of range. Should have last_row of 7
{'effective_date': str_to_seconds('2015-06-19'),
'ratio': 3.219,
'sid': 3},
],
columns=['effective_date', 'ratio', 'sid'],
)
DIVIDENDS = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-01'),
'ratio': 1.301,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 3.310,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 3.312,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-25'),
'ratio': 6.325,
'sid': 6},
# Another action in query range, should have last_row of 3
{'effective_date': str_to_seconds('2015-06-15'),
'ratio': 3.315,
'sid': 3},
# Last day of range. Should have last_row of 7
{'effective_date': str_to_seconds('2015-06-19'),
'ratio': 3.319,
'sid': 3},
],
columns=['effective_date', 'ratio', 'sid'],
)
class USEquityPricingLoaderTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.test_data_dir = TempDirectory()
cls.db_path = cls.test_data_dir.getpath('adjustments.db')
writer = SQLiteAdjustmentWriter(cls.db_path)
writer.write(SPLITS, MERGERS, DIVIDENDS)
cls.assets = TEST_QUERY_ASSETS
all_days = TradingEnvironment.instance().trading_days
cls.calendar_days = all_days[
all_days.slice_indexer(TEST_CALENDAR_START, TEST_CALENDAR_STOP)
]
cls.asset_info = EQUITY_INFO
cls.bcolz_writer = SyntheticDailyBarWriter(
cls.asset_info,
cls.calendar_days,
)
cls.bcolz_path = cls.test_data_dir.getpath('equity_pricing.bcolz')
cls.bcolz_writer.write(cls.bcolz_path, cls.calendar_days, cls.assets)
@classmethod
def tearDownClass(cls):
cls.test_data_dir.cleanup()
def test_input_sanity(self):
# Ensure that the input data doesn't contain adjustments during periods
# where the corresponding asset didn't exist.
for table in SPLITS, MERGERS, DIVIDENDS:
for eff_date_secs, _, sid in table.itertuples(index=False):
eff_date = Timestamp(eff_date_secs, unit='s')
asset_start, asset_end = EQUITY_INFO.ix[
sid, ['start_date', 'end_date']
]
self.assertGreaterEqual(eff_date, asset_start)
self.assertLessEqual(eff_date, asset_end)
def calendar_days_between(self, start_date, end_date):
return self.calendar_days[
self.calendar_days.slice_indexer(start_date, end_date)
]
def expected_adjustments(self, start_date, end_date):
price_adjustments = {}
volume_adjustments = {}
query_days = self.calendar_days_between(start_date, end_date)
start_loc = query_days.get_loc(start_date)
for table in SPLITS, MERGERS, DIVIDENDS:
for eff_date_secs, ratio, sid in table.itertuples(index=False):
eff_date = Timestamp(eff_date_secs, unit='s', tz='UTC')
# The boundary conditions here are subtle. An adjustment with
# an effective date equal to the query start can't have an
# effect because adjustments only the array for dates strictly
# less than the adjustment effective date.
if not (start_date < eff_date <= end_date):
continue
eff_date_loc = query_days.get_loc(eff_date)
delta = eff_date_loc - start_loc
# Pricing adjusments should be applied on the date
# corresponding to the effective date of the input data. They
# should affect all rows **before** the effective date.
price_adjustments.setdefault(delta, []).append(
Float64Multiply(
first_row=0,
last_row=delta - 1,
col=sid - 1,
value=ratio,
)
)
# Volume is *inversely* affected by *splits only*.
if table is SPLITS:
volume_adjustments.setdefault(delta, []).append(
Float64Multiply(
first_row=0,
last_row=delta - 1,
col=sid - 1,
value=1.0 / ratio,
)
)
return price_adjustments, volume_adjustments
def test_load_adjustments_from_sqlite(self):
reader = SQLiteAdjustmentReader(self.db_path)
columns = [USEquityPricing.close, USEquityPricing.volume]
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP
)
adjustments = reader.load_adjustments(
columns,
query_days,
self.assets,
)
close_adjustments = adjustments[0]
volume_adjustments = adjustments[1]
expected_close_adjustments, expected_volume_adjustments = \
self.expected_adjustments(TEST_QUERY_START, TEST_QUERY_STOP)
self.assertEqual(close_adjustments, expected_close_adjustments)
self.assertEqual(volume_adjustments, expected_volume_adjustments)
def test_read_no_adjustments(self):
adjustment_reader = NullAdjustmentReader()
columns = [USEquityPricing.close, USEquityPricing.volume]
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP
)
adjustments = adjustment_reader.load_adjustments(
columns,
query_days,
self.assets,
)
self.assertEqual(adjustments, [{}, {}])
baseline_reader = BcolzDailyBarReader(self.bcolz_path)
pricing_loader = USEquityPricingLoader(
baseline_reader,
adjustment_reader,
)
closes, volumes = pricing_loader.load_adjusted_array(
columns,
DataFrame(True, index=query_days, columns=self.assets),
)
expected_baseline_closes = self.bcolz_writer.expected_values_2d(
query_days,
self.assets,
'close',
)
expected_baseline_volumes = self.bcolz_writer.expected_values_2d(
query_days,
self.assets,
'volume',
)
# AdjustedArrays should yield the same data as the expected baseline.
for windowlen in range(1, len(query_days) + 1):
for offset, window in enumerate(closes.traverse(windowlen)):
assert_array_equal(
expected_baseline_closes[offset:offset + windowlen],
window,
)
for offset, window in enumerate(volumes.traverse(windowlen)):
assert_array_equal(
expected_baseline_volumes[offset:offset + windowlen],
window,
)
# Verify that we checked up to the longest possible window.
with self.assertRaises(WindowLengthTooLong):
closes.traverse(windowlen + 1)
with self.assertRaises(WindowLengthTooLong):
volumes.traverse(windowlen + 1)
def apply_adjustments(self, dates, assets, baseline_values, adjustments):
min_date, max_date = dates[[0, -1]]
values = baseline_values.copy()
for eff_date_secs, ratio, sid in adjustments.itertuples(index=False):
eff_date = seconds_to_timestamp(eff_date_secs)
if eff_date < min_date or eff_date > max_date:
continue
eff_date_loc = dates.get_loc(eff_date)
asset_col = assets.get_loc(sid)
# Apply ratio multiplicatively to the asset column on all rows
# **strictly less** than the adjustment effective date. Note that
# this will be a no-op in the case that the effective date is the
# first entry in dates.
values[:eff_date_loc, asset_col] *= ratio
return values
def test_read_with_adjustments(self):
columns = [USEquityPricing.high, USEquityPricing.volume]
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP
)
baseline_reader = BcolzDailyBarReader(self.bcolz_path)
adjustment_reader = SQLiteAdjustmentReader(self.db_path)
pricing_loader = USEquityPricingLoader(
baseline_reader,
adjustment_reader,
)
closes, volumes = pricing_loader.load_adjusted_array(
columns,
DataFrame(True, index=query_days, columns=arange(1, 7)),
)
expected_baseline_highs = self.bcolz_writer.expected_values_2d(
query_days,
self.assets,
'high',
)
expected_baseline_volumes = self.bcolz_writer.expected_values_2d(
query_days,
self.assets,
'volume',
)
# At each point in time, the AdjustedArrays should yield the baseline
# with all adjustments up to that date applied.
for windowlen in range(1, len(query_days) + 1):
for offset, window in enumerate(closes.traverse(windowlen)):
baseline = expected_baseline_highs[offset:offset + windowlen]
baseline_dates = query_days[offset:offset + windowlen]
expected_adjusted_highs = self.apply_adjustments(
baseline_dates,
self.assets,
baseline,
# Apply all adjustments.
concat([SPLITS, MERGERS, DIVIDENDS], ignore_index=True),
)
assert_allclose(expected_adjusted_highs, window)
for offset, window in enumerate(volumes.traverse(windowlen)):
baseline = expected_baseline_volumes[offset:offset + windowlen]
baseline_dates = query_days[offset:offset + windowlen]
# Apply only splits and invert the ratio.
adjustments = SPLITS.copy()
adjustments.ratio = 1 / adjustments.ratio
expected_adjusted_volumes = self.apply_adjustments(
baseline_dates,
self.assets,
baseline,
adjustments,
)
# FIXME: Make AdjustedArray properly support integral types.
assert_array_equal(
expected_adjusted_volumes,
window.astype(uint32),
)
# Verify that we checked up to the longest possible window.
with self.assertRaises(WindowLengthTooLong):
closes.traverse(windowlen + 1)
with self.assertRaises(WindowLengthTooLong):
volumes.traverse(windowlen + 1)
|
apache-2.0
|
spelteam/spel
|
src/python/plotDetSeqErrors.py
|
1
|
4407
|
#! /usr/bin/env python2.7
import glob
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import argparse
import numpy as np
import matplotlib.image as mpimg
from matplotlib.lines import Line2D
from pylab import figure, show
import math
import os
import re
def usage():
print("Author: Mykyta Fastovets / poselib project / 2015")
print("This utility is an analysis tool for plotting error files generated by the poselib tuners.")
print("Input should be a .err file.")
print("Example usage: ./plotSimVsTemp.py ~/file.err ")
def dist(a,b):
return math.sqrt((a[0]-b[0])**2+(a[1] - b[1])**2)
parser = argparse.ArgumentParser(description='1 non-optional argument')
parser.add_argument('ERRIN', action="store")
parseResult = parser.parse_args()
#DATADIR contains that folder that contains all other data
errFile = parseResult.ERRIN
data = [line.strip().split() for line in open(errFile)] #read the data from the int file
firstLine = data.pop(0) #pop the first line off the data stack
numParams = int(firstLine.pop(0)) #the number of parameters in the file
pfRMS=[]
frameMeans=[]
frameDevs=[]
paramNames=[]
means=[]
devs=[]
for dataItem in data: #data now contains everything but the first line
frameID = int(dataItem[0])
params = [float(x) for x in dataItem[1:numParams+1]]
if frameID<=20:
if frameID==0:
paramNames.append(params)
if len(means)!=0:
frameMeans.append(means)
frameDevs.append(devs)
means=[]
devs=[]
#rest of the items are bodypart errors
partErrors=dataItem[numParams+1:len(dataItem)]
for err, stddev in zip(partErrors[0::2], partErrors[1::2]): #for each number
means.append(float(err))
devs.append(float(stddev));
#mMean = np.mean(means)
#mDev = np.std(means)
#dMean = np.mean(devs)
#dDev = np.std(devs)
#we now have the mean and SD for both, error and SD of labels at a particular frame, to associate with parameters
pfRMS.append([params, means, devs]) #means and std devs pushed
fig = plt.figure()
ax = fig.add_subplot(111)
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.xaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ap = ax.boxplot(frameMeans, patch_artist=True)
ax.set_ylabel('RMS Error (pix)', fontsize=30)
plt.xticks(rotation=45)
## change outline color, fill color and linewidth of the boxes
for box in ap['boxes']:
# change outline color
box.set( color='#7570b3', linewidth=2)
# change fill color
box.set( facecolor = '#1b9e77' )
## change color and linewidth of the whiskers
for whisker in ap['whiskers']:
whisker.set(color='#009933', linewidth=2)
## change color and linewidth of the caps
for cap in ap['caps']:
cap.set(color='#009933', linewidth=2)
## change color and linewidth of the medians
for median in ap['medians']:
median.set(color='#b2df8a', linewidth=2)
## change the style of fliers and their fill
for flier in ap['fliers']:
flier.set(marker='o', color='#e7298a', alpha=0.5)
# bx = fig.add_subplot(212)
# bx.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
# alpha=0.5)
# bx.xaxis.grid(True, linestyle='-', which='major', color='lightgrey',
# alpha=0.5)
bp = ax.boxplot(frameDevs, patch_artist=True)
## change outline color, fill color and linewidth of the boxes
for box in bp['boxes']:
# change outline color
box.set( color='#7570b3', linewidth=2)
# change fill color
box.set( facecolor = '#771b9e' )
## change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='#7570b3', linewidth=2)
## change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='#7570b3', linewidth=2)
## change color and linewidth of the medians
for median in bp['medians']:
median.set(color='#FF6600', linewidth=2)
## change the style of fliers and their fill
for flier in bp['fliers']:
flier.set(marker='o', color='#e7298a', alpha=0.5)
## Remove top axes and right axes ticks
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# bx.get_xaxis().tick_bottom()
# bx.get_yaxis().tick_left()
plt.xticks(rotation=45)
#plt.setp(paramNames, rotation=45, fontsize=8)
ax.set_xticklabels(paramNames)
# bx.set_xticklabels(paramNames)
print paramNames
# Save the figure
plt.show()
#fig.savefig('testFig.png', bbox_inches='tight')
|
gpl-3.0
|
3manuek/scikit-learn
|
sklearn/tests/test_qda.py
|
155
|
3481
|
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
|
bsd-3-clause
|
chengsoonong/crowdastro-projects
|
ATLAS-CDFS/scripts/plot_zid.py
|
1
|
6213
|
"""Plot a Zooniverse subject.
Matthew Alger <[email protected]>
Research School of Astronomy and Astrophysics
The Australian National University
2017
"""
import aplpy
import astropy.coordinates
import astropy.io.ascii
import astropy.io.fits
import matplotlib.pyplot as plt
import matplotlib.patches
import numpy
import matplotlib
import matplotlib.pyplot as plt
import numpy
INCHES_PER_PT = 1.0 / 72.27
COLUMN_WIDTH_PT = 240.0
FONT_SIZE_PT = 8.0
pgf_with_latex = {
"pgf.texsystem": "pdflatex",
"text.usetex": True,
"font.family": "serif",
"font.serif": [],
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": FONT_SIZE_PT,
"font.size": FONT_SIZE_PT,
"legend.fontsize": FONT_SIZE_PT,
"xtick.labelsize": FONT_SIZE_PT,
"ytick.labelsize": FONT_SIZE_PT,
"figure.figsize": (COLUMN_WIDTH_PT * INCHES_PER_PT, 0.8 * COLUMN_WIDTH_PT * INCHES_PER_PT),
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}",
r"\usepackage[T1]{fontenc}",
]
}
matplotlib.rcParams.update(pgf_with_latex)
def plot(radio_path, ir_path, plot_atlas_hosts=False, vmax=99.5, centrebox=False,
centreboxwidth=None, width_in_px=True, stretch='arcsinh', fig=None,
first=False):
fig = aplpy.FITSFigure(ir_path, slices=[0, 1], figure=fig)
fig.set_theme('publication')
# if not v:
# fig.show_grayscale(stretch=stretch, invert=True)
# else:
# fig.show_grayscale(stretch=stretch, vmin=v[0], vmax=v[1], invert=True)
with astropy.io.fits.open(ir_path) as f:
fig.show_colorscale(cmap='cubehelix_r', vmin=f[0].data.min(), vmax=numpy.percentile(f[0].data, vmax))
if plot_atlas_hosts:
table = astropy.io.ascii.read(
'/Users/alger/data/RGZ/dr1_weighted_old/static_rgz_host_full.csv')
ras = table['SWIRE.ra']
decs = table['SWIRE.dec']
fig.show_markers(ras, decs, marker='x', s=50, c='red')
if centrebox:
with astropy.io.fits.open(radio_path) as f, astropy.io.fits.open(ir_path) as g:
if first:
contours = numpy.array([4, 8, 16, 32, 64, 128, 256]) * 0.14e-3
fig.show_contour(f, levels=contours, colors='black', linewidths=0.75, zorder=2, slices=[2, 3])
else:
contours = [4, 8, 16, 32, 64, 128, 256]
fig.show_contour(f, levels=contours, colors='black', linewidths=0.75, zorder=2)
centre = numpy.array(g[0].data.shape) // 2
if not first:
cdelt1 = f[0].header['CDELT1']
cdelt2 = f[0].header['CDELT2']
ra, dec = fig.pixel2world(centre[0], centre[1])
else:
cdelt1 = f[0].header['CDELT3']
cdelt2 = f[0].header['CDELT4']
ra, dec = fig.pixel2world(centre[0], centre[1])
if width_in_px:
width = -cdelt1 * centreboxwidth
height = cdelt2 * centreboxwidth
else:
width = centreboxwidth
height = centreboxwidth
fig.show_rectangles([ra], [dec], width, height, color='r', linewidth=1)
else:
with astropy.io.fits.open(radio_path) as f:
if first:
contours = numpy.array([4, 8, 16, 32, 64]) * 0.14e-3
fig.show_contour(f, levels=contours, colors='black', linewidths=0.75, zorder=2, slices=[2, 3])
else:
contours = [4, 8, 16, 32, 64, 128, 256]
fig.show_contour(f, levels=contours, colors='black', linewidths=0.75, zorder=2)
fig.axis_labels.set_xtext('Right Ascension (J2000)')
fig.axis_labels.set_ytext('Declination (J2000)')
# fig.ticks.set_linewidth(2)
# fig.ticks.set_color('black')
# fig.tick_labels.set_font(size='xx-large', weight='medium', \
# stretch='normal', family='sans-serif', \
# style='normal', variant='normal')
# fig.axis_labels.set_font(size='xx-large', weight='medium', \
# stretch='normal', family='sans-serif', \
# style='normal', variant='normal')
fig.set_tick_labels_format(xformat='hh:mm:ss',yformat='dd:mm:ss')
return fig
def plot_box_FIRST(fig, path):
fig.show_rectangles([])
# rect = matplotlib.patches.Rectangle((267 / 2 - 267 / 8 * 3 / 2, 267 / 2 - 267 / 8 * 3 / 2), 267 / 8 * 3, 267 / 8 * 3, facecolor='None', edgecolor='red', linewidth=2)
# plt.gca().add_patch(rect)
# def plot_box_ATLAS(fig, path):
# rect = matplotlib.patches.Rectangle((100 - 35, 100 - 35), 70, 70, facecolor='None', edgecolor='red', linewidth=2)
# plt.gca().add_patch(rect)
if __name__ == '__main__':
# radio_path = "/Users/alger/data/RGZ/cdfs/2x2/CI2363_radio.fits"
# ir_path = "/Users/alger/data/RGZ/cdfs/2x2/CI2363_ir.fits"
# fig = plt.figure()
# fig = plot(radio_path, ir_path, plot_atlas_hosts=False, centrebox=True, centreboxwidth=48 / 60 / 60, width_in_px=False, fig=fig)
# plt.subplots_adjust(top=1, right=0.95, left=0.3)
# plt.show()
# plt.savefig('/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/images/CI2363_fig.pdf')
# radio_path = "/Users/alger/data/RGZ/cdfs/2x2/CI0077C1_radio.fits"
# ir_path = "/Users/alger/data/RGZ/cdfs/2x2/CI0077C1_ir.fits"
# fig = plt.figure()
# fig = plot(radio_path, ir_path, plot_atlas_hosts=True, centrebox=False, fig=fig, vmax=99.9)
# plt.subplots_adjust(top=1, right=0.95, left=0.3)
# plt.show()
# plt.savefig('/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/images/CI0077C1_fig.pdf')
# radio_path = "/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/images/FIRSTJ151227.2+454026_8.fits"
# ir_path = "/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/images/2279p454_ac51-w1-int-3_ra228.11333333333332_dec45.67388888888889_asec480.000.fits"
# fig = plt.figure()
# fig = plot(radio_path, ir_path, plot_atlas_hosts=False, centrebox=True, centreboxwidth=3 / 60, width_in_px=False, stretch='linear', fig=fig, first=True)
# plt.subplots_adjust(top=1, right=0.95, left=0.3)
# plt.show()
# plt.savefig('/Users/alger/repos/crowdastro-projects/ATLAS-CDFS/images/FIRSTJ151227_fig.pdf')
|
mit
|
svobodam/Deep-Learning-Text-Summariser
|
Models/sequenceNet.py
|
1
|
5910
|
# Original script developed by Harshal Priyadarshi https://github.com/harpribot and from Tensorflow: https://github.com/tensorflow/models/tree/master/tutorials/rnn/ptb
# Edited for purpose of this project.
# Import required libraries
from tensorflow.python.framework import ops
import tensorflow as tf
import numpy as np
import pandas as pd
from abc import abstractmethod, ABCMeta
class NeuralNet(object):
__metaclass__ = ABCMeta
def __init__(self):
# Sequence-to-sequence Neural Network
# parameters
self.train_batch_size = None
self.test_batch_size = None
self.memory_dim = None
self.learning_rate = None
self.saver = None
self.sess = None
self.test_size = self.test_size
self.checkpointSys = self.checkpointSys
self.mapper_dict = self.mapper_dict
self.test_review = self.test_review
self.true_summary = self.true_summary
self.predicted_test_summary = self.predicted_test_summary
# Load all the parameters
self._load_model_params()
def set_parameters(self, train_batch_size, test_batch_size, memory_dim, learning_rate):
#Set the parameters for the model and training.
#parameter train_batch_size: The batch size of examples used for batch training
#parameter test_batch_size: The batch size of test examples used for testing
#parameter memory_dim: The length of the hidden vector produced by the encoder
#parameter learning_rate: The learning rate for Stochastic Gradient Descent
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.memory_dim = memory_dim
self.learning_rate = learning_rate
@abstractmethod
def _load_data(self):
pass
@abstractmethod
def _split_train_tst(self):
pass
def _load_model_params(self):
#Load model parameters
#self.seq_length -> The length of the input sequence (Length of input sentence fed to the encoder-decoder model)
#self.vocab_size -> The size of the data vocabulary
#self.momentum -> The momentum parameter in the update rule for SGD
#:return: None
# parameters
self.seq_length = self.mapper_dict['seq_length']
self.vocab_size = self.mapper_dict['vocab_size']
self.momentum = 0.9
def begin_session(self):
#Begins the session
# start the tensorflow session
# start the tensorflow session
ops.reset_default_graph()
# initialize interactive session
self.sess = tf.Session()
def form_model_graph(self):
#Creates the data graph, loads the model and optimizer and then starts the session.
#:return: None
self._load_data_graph()
self._load_model()
self._load_optimizer()
self._start_session()
@abstractmethod
def _load_data_graph(self):
pass
@abstractmethod
def _load_model(self):
pass
@abstractmethod
def _load_optimizer(self):
pass
def _start_session(self):
print("Starting tensorflow session...")
self.sess.run(tf.global_variables_initializer())
# initialize the saver node
# print tf.GraphKeys.GLOBAL_VARIABLES
self.saver = tf.train.Saver(tf.global_variables())
# get the latest checkpoint
last_checkpoint_path = self.checkpointSys.get_last_checkpoint()
if last_checkpoint_path is not None:
print 'Previous saved tensorflow objects found... Extracting...'
# restore the tensorflow variables
self.saver.restore(self.sess, last_checkpoint_path)
print 'Extraction Complete. Moving Forward....'
@abstractmethod
def fit(self):
pass
def _index2sentence(self, list_):
# Converts the indexed sentence to the actual sentence, return string
rev_map = self.mapper_dict['rev_map'] # rev_map is reverse mapping from index in vocabulary to actual word
sentence = ""
for entry in list_:
if entry != 0:
sentence += (rev_map[entry] + " ")
return sentence
def store_test_predictions(self, prediction_id='_final'):
# Stores the test predictions in a CSV file
# param prediction_id: A simple id appended to the name of the summary for uniqueness
# prediction id is usually the step count
print 'Storing predictions on Test Data...'
document = []
true_summary = []
generated_summary = []
for i in range(self.test_size):
if not self.checkpointSys.is_output_file_present():
document.append(self._index2sentence(self.test_review[i]))
true_summary.append(self._index2sentence(self.true_summary[i]))
if i < (self.test_batch_size * (self.test_size // self.test_batch_size)):
generated_summary.append(self._index2sentence(self.predicted_test_summary[i]))
else:
generated_summary.append('')
prediction_nm = 'generated_summary' + prediction_id
if self.checkpointSys.is_output_file_present():
df = pd.read_csv(self.checkpointSys.get_result_location(), header=0)
df[prediction_nm] = np.array(generated_summary)
else:
df = pd.DataFrame()
df['document'] = np.array(document)
df['true_summary'] = np.array(true_summary)
df[prediction_nm] = np.array(generated_summary)
df.to_csv(self.checkpointSys.get_result_location(), index=False)
print 'Stored the predictions. Moving Forward'
if prediction_id == '_final':
print 'All done. Exiting..'
print 'Exited'
|
mit
|
themrmax/scikit-learn
|
examples/applications/plot_stock_market.py
|
6
|
9353
|
"""
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from six.moves.urllib.request import urlopen
from six.moves.urllib.parse import urlencode
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
def quotes_historical_google(symbol, date1, date2):
"""Get the historical data from Google finance.
Parameters
----------
symbol : str
Ticker symbol to query for, for example ``"DELL"``.
date1 : datetime.datetime
Start date.
date2 : datetime.datetime
End date.
Returns
-------
X : array
The columns are ``date`` -- datetime, ``open``, ``high``,
``low``, ``close`` and ``volume`` of type float.
"""
params = urlencode({
'q': symbol,
'startdate': date1.strftime('%b %d, %Y'),
'enddate': date2.strftime('%b %d, %Y'),
'output': 'csv'
})
url = 'http://www.google.com/finance/historical?' + params
with urlopen(url) as response:
dtype = {
'names': ['date', 'open', 'high', 'low', 'close', 'volume'],
'formats': ['object', 'f4', 'f4', 'f4', 'f4', 'f4']
}
converters = {0: lambda s: datetime.strptime(s.decode(), '%d-%b-%y')}
return np.genfromtxt(response, delimiter=',', skip_header=1,
dtype=dtype, converters=converters,
missing_values='-', filling_values=-1)
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime(2003, 1, 1)
d2 = datetime(2008, 1, 1)
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'McDonald\'s',
'PEP': 'Pepsi',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas Instruments',
'XRX': 'Xerox',
'WMT': 'Wal-Mart',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [
quotes_historical_google(symbol, d1, d2) for symbol in symbols
]
close_prices = np.stack([q['close'] for q in quotes])
open_prices = np.stack([q['open'] for q in quotes])
# The daily variations of the quotes are what carry most information
variation = close_prices - open_prices
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
|
bsd-3-clause
|
andaag/scikit-learn
|
sklearn/manifold/tests/test_locally_linear.py
|
232
|
4761
|
from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
|
bsd-3-clause
|
cloudera/ibis
|
ibis/backends/tests/test_vectorized_udf.py
|
1
|
11404
|
import pytest
import ibis
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
from ibis.expr.window import window
from ibis.udf.vectorized import analytic, elementwise, reduction
pytestmark = pytest.mark.udf
@elementwise(input_type=[dt.double], output_type=dt.double)
def add_one(s):
return s + 1
@analytic(input_type=[dt.double], output_type=dt.double)
def calc_zscore(s):
return (s - s.mean()) / s.std()
@reduction(input_type=[dt.double], output_type=dt.double)
def calc_mean(s):
return s.mean()
@elementwise(
input_type=[dt.double],
output_type=dt.Struct(['col1', 'col2'], [dt.double, dt.double]),
)
def add_one_struct(v):
return v + 1, v + 2
@analytic(
input_type=[dt.double, dt.double],
output_type=dt.Struct(['demean', 'demean_weight'], [dt.double, dt.double]),
)
def demean_struct(v, w):
return v - v.mean(), w - w.mean()
@reduction(
input_type=[dt.double, dt.double],
output_type=dt.Struct(['mean', 'mean_weight'], [dt.double, dt.double]),
)
def mean_struct(v, w):
return v.mean(), w.mean()
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_elementwise_udf(backend, alltypes, df):
result = add_one(alltypes['double_col']).execute()
expected = add_one.func(df['double_col'])
backend.assert_series_equal(result, expected, check_names=False)
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_elementwise_udf_mutate(backend, alltypes, df):
expr = alltypes.mutate(incremented=add_one(alltypes['double_col']))
result = expr.execute()
expected = df.assign(incremented=add_one.func(df['double_col']))
backend.assert_series_equal(result['incremented'], expected['incremented'])
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_analytic_udf(backend, alltypes, df):
result = calc_zscore(alltypes['double_col']).execute()
expected = calc_zscore.func(df['double_col'])
backend.assert_series_equal(result, expected, check_names=False)
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_analytic_udf_mutate(backend, alltypes, df):
expr = alltypes.mutate(zscore=calc_zscore(alltypes['double_col']))
result = expr.execute()
expected = df.assign(zscore=calc_zscore.func(df['double_col']))
backend.assert_series_equal(result['zscore'], expected['zscore'])
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_reduction_udf(backend, alltypes, df):
result = calc_mean(alltypes['double_col']).execute()
expected = df['double_col'].agg(calc_mean.func)
assert result == expected
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_output_type_in_list_invalid(backend, alltypes, df):
# Test that an error is raised if UDF output type is wrapped in a list
with pytest.raises(
com.IbisTypeError,
match="The output type of a UDF must be a single datatype.",
):
@elementwise(input_type=[dt.double], output_type=[dt.double])
def add_one(s):
return s + 1
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_valid_kwargs(backend, alltypes, df):
# Test different forms of UDF definition with keyword arguments
@elementwise(input_type=[dt.double], output_type=dt.double)
def foo1(v):
# Basic UDF with kwargs
return v + 1
@elementwise(input_type=[dt.double], output_type=dt.double)
def foo2(v, *, amount):
# UDF with keyword only arguments
return v + amount
@elementwise(input_type=[dt.double], output_type=dt.double)
def foo3(v, **kwargs):
# UDF with kwargs
return v + kwargs.get('amount', 1)
result = alltypes.mutate(
v1=foo1(alltypes['double_col']),
v2=foo2(alltypes['double_col'], amount=1),
v3=foo2(alltypes['double_col'], amount=2),
v4=foo3(alltypes['double_col']),
v5=foo3(alltypes['double_col'], amount=2),
v6=foo3(alltypes['double_col'], amount=3),
).execute()
expected = df.assign(
v1=df['double_col'] + 1,
v2=df['double_col'] + 1,
v3=df['double_col'] + 2,
v4=df['double_col'] + 1,
v5=df['double_col'] + 2,
v6=df['double_col'] + 3,
)
backend.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_valid_args(backend, alltypes, df):
# Test different forms of UDF definition with *args
@elementwise(input_type=[dt.double, dt.string], output_type=dt.double)
def foo1(*args):
return args[0] + len(args[1])
@elementwise(input_type=[dt.double, dt.string], output_type=dt.double)
def foo2(v, *args):
return v + len(args[0])
result = alltypes.mutate(
v1=foo1(alltypes['double_col'], alltypes['string_col']),
v2=foo2(alltypes['double_col'], alltypes['string_col']),
).execute()
expected = df.assign(
v1=df['double_col'] + len(df['string_col']),
v2=df['double_col'] + len(df['string_col']),
)
backend.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_valid_args_and_kwargs(backend, alltypes, df):
# Test UDFs with both *args and keyword arguments
@elementwise(input_type=[dt.double, dt.string], output_type=dt.double)
def foo1(*args, amount):
# UDF with *args and a keyword-only argument
return args[0] + len(args[1]) + amount
@elementwise(input_type=[dt.double, dt.string], output_type=dt.double)
def foo2(*args, **kwargs):
# UDF with *args and **kwargs
return args[0] + len(args[1]) + kwargs.get('amount', 1)
@elementwise(input_type=[dt.double, dt.string], output_type=dt.double)
def foo3(v, *args, amount):
# UDF with an explicit positional argument, *args, and a keyword-only
# argument
return v + len(args[0]) + amount
@elementwise(input_type=[dt.double, dt.string], output_type=dt.double)
def foo4(v, *args, **kwargs):
# UDF with an explicit positional argument, *args, and **kwargs
return v + len(args[0]) + kwargs.get('amount', 1)
result = alltypes.mutate(
v1=foo1(alltypes['double_col'], alltypes['string_col'], amount=2),
v2=foo2(alltypes['double_col'], alltypes['string_col'], amount=2),
v3=foo3(alltypes['double_col'], alltypes['string_col'], amount=2),
v4=foo4(alltypes['double_col'], alltypes['string_col'], amount=2),
).execute()
expected = df.assign(
v1=df['double_col'] + len(df['string_col']) + 2,
v2=df['double_col'] + len(df['string_col']) + 2,
v3=df['double_col'] + len(df['string_col']) + 2,
v4=df['double_col'] + len(df['string_col']) + 2,
)
backend.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_invalid_kwargs(backend, alltypes):
# Test that defining a UDF with a non-column argument that is not a
# keyword argument raises an error
with pytest.raises(TypeError, match=".*must be defined as keyword only.*"):
@elementwise(input_type=[dt.double], output_type=dt.double)
def foo1(v, amount):
return v + 1
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
# TODO - udf - #2553
@pytest.mark.xfail_backends(['dask'])
@pytest.mark.xfail_unsupported
def test_elementwise_udf_destruct(backend, alltypes):
result = alltypes.mutate(
add_one_struct(alltypes['double_col']).destructure()
).execute()
expected = alltypes.mutate(
col1=alltypes['double_col'] + 1, col2=alltypes['double_col'] + 2,
).execute()
backend.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['pandas', 'pyspark'])
@pytest.mark.xfail_unsupported
def test_elementwise_udf_named_destruct(backend, alltypes):
"""Test error when assigning name to a destruct column."""
with pytest.raises(
com.ExpressionError, match=r".*Cannot name a destruct.*"
):
alltypes.mutate(
new_struct=add_one_struct(alltypes['double_col']).destructure()
)
@pytest.mark.only_on_backends(['pyspark'])
@pytest.mark.xfail_unsupported
def test_elementwise_udf_struct(backend, alltypes):
result = alltypes.mutate(
new_col=add_one_struct(alltypes['double_col'])
).execute()
result = result.assign(
col1=result['new_col'].apply(lambda x: x[0]),
col2=result['new_col'].apply(lambda x: x[1]),
)
result = result.drop('new_col', axis=1)
expected = alltypes.mutate(
col1=alltypes['double_col'] + 1, col2=alltypes['double_col'] + 2,
).execute()
backend.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['pandas'])
# TODO - udf - #2553
@pytest.mark.xfail_backends(['dask'])
def test_analytic_udf_destruct(backend, alltypes):
w = window(preceding=None, following=None, group_by='year')
result = alltypes.mutate(
demean_struct(alltypes['double_col'], alltypes['int_col'])
.over(w)
.destructure()
).execute()
expected = alltypes.mutate(
demean=alltypes['double_col'] - alltypes['double_col'].mean().over(w),
demean_weight=alltypes['int_col'] - alltypes['int_col'].mean().over(w),
).execute()
backend.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['pandas'])
# TODO - udf - #2553
@pytest.mark.xfail_backends(['dask'])
def test_reduction_udf_destruct_groupby(backend, alltypes):
result = (
alltypes.groupby('year')
.aggregate(
mean_struct(
alltypes['double_col'], alltypes['int_col']
).destructure()
)
.execute()
)
expected = (
alltypes.groupby('year')
.aggregate(
mean=alltypes['double_col'].mean(),
mean_weight=alltypes['int_col'].mean(),
)
.execute()
)
backend.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['pandas'])
# TODO - udf - #2553
@pytest.mark.xfail_backends(['dask'])
def test_reduction_udf_destruct_no_groupby(backend, alltypes):
result = alltypes.aggregate(
mean_struct(alltypes['double_col'], alltypes['int_col']).destructure()
).execute()
expected = alltypes.aggregate(
mean=alltypes['double_col'].mean(),
mean_weight=alltypes['int_col'].mean(),
).execute()
backend.assert_frame_equal(result, expected)
@pytest.mark.only_on_backends(['pandas'])
# TODO - udf - #2553
@pytest.mark.xfail_backends(['dask'])
def test_reduction_udf_destruct_window(backend, alltypes):
win = window(
preceding=ibis.interval(hours=2),
following=0,
group_by='year',
order_by='timestamp_col',
)
result = alltypes.mutate(
mean_struct(alltypes['double_col'], alltypes['int_col'])
.over(win)
.destructure()
).execute()
expected = alltypes.mutate(
mean=alltypes['double_col'].mean().over(win),
mean_weight=alltypes['int_col'].mean().over(win),
).execute()
backend.assert_frame_equal(result, expected)
|
apache-2.0
|
imperial-genomics-facility/data-management-python
|
test/dbadaptor/projectadaptor_test.py
|
1
|
6564
|
import os, unittest
import pandas as pd
from sqlalchemy import create_engine
from igf_data.igfdb.igfTables import Base, Project, Project_attribute, Sample
from igf_data.igfdb.baseadaptor import BaseAdaptor
from igf_data.igfdb.projectadaptor import ProjectAdaptor
from igf_data.igfdb.useradaptor import UserAdaptor
from igf_data.igfdb.sampleadaptor import SampleAdaptor
from igf_data.utils.dbutils import read_dbconf_json
class Projectadaptor_test1(unittest.TestCase):
def setUp(self):
self.dbconfig='data/dbconfig.json'
dbparam=read_dbconf_json(self.dbconfig)
base=BaseAdaptor(**dbparam)
self.engine=base.engine
self.dbname=dbparam['dbname']
Base.metadata.create_all(self.engine)
self.session_class=base.get_session_class()
project_data=[{'project_igf_id':'IGFP0001_test_22-8-2017_rna',
'project_name':'test_22-8-2017_rna',
'description':'Its project 1',
'project_deadline':'Before August 2017',
'comments':'Some samples are treated with drug X',
},
{'project_igf_id':'IGFP0002_test_22-8-2017_rna',
'project_name':'test_23-8-2017_rna',
'description':'Its project 2',
'project_deadline':'Before August 2017',
'comments':'Some samples are treated with drug X',
}]
base.start_session()
pa=ProjectAdaptor(**{'session':base.session})
pa.store_project_and_attribute_data(data=project_data)
sa=SampleAdaptor(**{'session': base.session})
sample_data=[{'sample_igf_id':'IGFS001','project_igf_id':'IGFP0001_test_22-8-2017_rna',},
{'sample_igf_id':'IGFS002','project_igf_id':'IGFP0001_test_22-8-2017_rna',},
{'sample_igf_id':'IGFS003','project_igf_id':'IGFP0001_test_22-8-2017_rna',},
{'sample_igf_id':'IGFS004','project_igf_id':'IGFP0001_test_22-8-2017_rna','status':'FAILED',},
]
sa.store_sample_and_attribute_data(data=sample_data)
base.close_session()
def tearDown(self):
Base.metadata.drop_all(self.engine)
os.remove(self.dbname)
def test_fetch_project_samples(self):
pa=ProjectAdaptor(**{'session_class':self.session_class})
pa.start_session()
sample1=pa.fetch_project_samples(project_igf_id='IGFP0001_test_22-8-2017_rna',output_mode='dataframe')
self.assertEqual(len(sample1.index),3)
sample2=pa.fetch_project_samples(project_igf_id='IGFP0002_test_22-8-2017_rna',output_mode='dataframe')
self.assertEqual(len(sample2.index),0)
sample3=pa.fetch_project_samples(project_igf_id='IGFP0001_test_22-8-2017_rna',
only_active=False,
output_mode='dataframe')
self.assertEqual(len(sample3.index),4)
pa.close_session()
def test_count_project_samples(self):
pa=ProjectAdaptor(**{'session_class':self.session_class})
pa.start_session()
sample1=pa.count_project_samples(project_igf_id='IGFP0001_test_22-8-2017_rna')
self.assertEqual(sample1,3)
sample2=pa.count_project_samples(project_igf_id='IGFP0002_test_22-8-2017_rna')
self.assertEqual(sample2,0)
sample3=pa.count_project_samples(project_igf_id='IGFP0001_test_22-8-2017_rna',
only_active=False)
self.assertEqual(sample3,4)
pa.close_session()
class Projectadaptor_test2(unittest.TestCase):
def setUp(self):
self.dbconfig='data/dbconfig.json'
dbparam=read_dbconf_json(self.dbconfig)
base=BaseAdaptor(**dbparam)
self.engine=base.engine
self.dbname=dbparam['dbname']
Base.metadata.create_all(self.engine)
self.session_class=base.get_session_class()
project_data=[{'project_igf_id':'IGFP0001_test_22-8-2017_rna',
'project_name':'test_22-8-2017_rna',
'description':'Its project 1',
'project_deadline':'Before August 2017',
'comments':'Some samples are treated with drug X',
},
{'project_igf_id':'IGFP0002_test_22-8-2017_rna',
'project_name':'test_23-8-2017_rna',
'description':'Its project 2',
'project_deadline':'Before August 2017',
'comments':'Some samples are treated with drug X'}]
user_data=[{'name':'UserA',
'email_id':'[email protected]',
'username':'usera'}]
project_user_data=[{'project_igf_id': 'IGFP0001_test_22-8-2017_rna',
'email_id': '[email protected]',
'data_authority':True},
{'project_igf_id': 'IGFP0002_test_22-8-2017_rna',
'email_id': '[email protected]'}]
base.start_session()
ua=UserAdaptor(**{'session':base.session})
ua.store_user_data(data=user_data)
pa=ProjectAdaptor(**{'session':base.session})
pa.store_project_and_attribute_data(data=project_data)
pa.assign_user_to_project(data=project_user_data)
base.close_session()
def tearDown(self):
Base.metadata.drop_all(self.engine)
os.remove(self.dbname)
def test_check_data_authority_for_project(self):
pa=ProjectAdaptor(**{'session_class':self.session_class})
pa.start_session()
pa_results1=pa.check_data_authority_for_project(project_igf_id='IGFP0001_test_22-8-2017_rna')
self.assertTrue(pa_results1)
pa_results2=pa.check_data_authority_for_project(project_igf_id='IGFP0002_test_22-8-2017_rna')
self.assertFalse(pa_results2)
pa.close_session()
def test_fetch_data_authority_for_project(self):
pa=ProjectAdaptor(**{'session_class':self.session_class})
pa.start_session()
pa_results1=pa.fetch_data_authority_for_project(project_igf_id='IGFP0001_test_22-8-2017_rna')
self.assertEqual(pa_results1.email_id,'[email protected]')
pa_results2=pa.fetch_data_authority_for_project(project_igf_id='IGFP0002_test_22-8-2017_rna')
self.assertEqual(pa_results2, None)
pa.close_session()
def test_fetch_all_project_igf_ids(self):
pa = ProjectAdaptor(**{'session_class':self.session_class})
pa.start_session()
project_list = pa.fetch_all_project_igf_ids()
pa.close_session()
self.assertTrue('IGFP0002_test_22-8-2017_rna' in project_list['project_igf_id'].values)
self.assertTrue('IGFP0001_test_22-8-2017_rna' in project_list['project_igf_id'].values)
self.assertEqual(len(project_list['project_igf_id'].values), 2)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
jorge2703/scikit-learn
|
sklearn/utils/validation.py
|
67
|
24013
|
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
|
bsd-3-clause
|
dyno/LMK
|
lmk/test/test_cache.py
|
1
|
4051
|
import unittest
from tempfile import TemporaryDirectory
from pandas_datareader.data import DataReader
from pandas import to_datetime
from numpy import dtype
from lmk.utils import env
from lmk.cache import Cache
def date(s):
return to_datetime(s).date()
DS = "google"
class CacheTestCase(unittest.TestCase):
"""Tests for `lmk.cache`."""
def setUp(self):
# env.logger.setLevel(logging.WARN)
self.symbol = "TSLA"
self.start = "2015-04-01"
self.end = "2015-06-30"
self.h = DataReader(self.symbol, DS, self.start, self.end)
def test_cache(self):
with TemporaryDirectory(prefix="lmk.") as tmpdir:
cache = Cache(tmpdir)
self.assertTrue(list(cache.range.columns) == ["start", "end"])
self.assertEqual(cache.range.dtypes.loc["start"], dtype("<M8[ns]"))
def cache_range():
r = cache.range.loc[self.symbol]
return r["start"].date(), r["end"].date()
# initial put
cache.put(self.symbol, date(self.start), date(self.end), self.h)
self.assertEqual(cache.range.dtypes.loc["end"], dtype("<M8[ns]"))
self.assertEqual(cache_range(), (date(self.start), date(self.end)))
# no data cached for the symbol.
start, end = "2015-01-01", "2015-01-31"
h = cache.get("NONEXIST", date(start), date(end))
self.assertTrue(h is None)
# on the left, no overlap
start, end = "2015-01-01", "2015-01-31"
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is None)
self.assertEqual(cache_range(), (date(self.start), date(self.end)))
h1 = DataReader(self.symbol, DS, start, end)
cache.put(self.symbol, date(start), date(end), h1)
self.assertEqual(cache_range(), (date(self.start), date(self.end)))
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is None) # only the most recent range is saved.
# on the right, no overlap
start, end = "2016-01-01", "2016-05-31"
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is None)
h1 = DataReader(self.symbol, DS, start, end)
cache.put(self.symbol, date(start), date(end), h1)
self.assertEqual(cache_range(), (date(start), date(end)))
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is not None) # only the most recent range is saved.
# overlap on the left
start, end = "2015-12-01", "2016-03-31"
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is None)
h1 = DataReader(self.symbol, DS, start, end)
cache.put(self.symbol, date(start), date(end), h1)
self.assertEqual(cache_range(), (date(start), date("2016-05-31")))
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is not None) # cache extended
# overlap on the right
start, end = "2016-04-01", "2016-06-30"
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is None)
h1 = DataReader(self.symbol, DS, start, end)
cache.put(self.symbol, date(start), date(end), h1)
self.assertEqual(cache_range(), (date("2015-12-01"), date("2016-06-30")))
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is not None) # cache extended
# hit - part
start, end = "2016-01-01", "2016-05-31"
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is not None)
# hit - full
start, end = "2015-12-01", "2016-06-30"
h = cache.get(self.symbol, date(start), date(end))
self.assertTrue(h is not None)
if __name__ == "__main__":
unittest.main()
|
mit
|
umuzungu/zipline
|
zipline/data/us_equity_pricing.py
|
1
|
42320
|
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod, abstractproperty
from errno import ENOENT
from functools import partial
from os import remove
from os.path import exists
import sqlite3
import warnings
from bcolz import (
carray,
ctable,
)
from collections import namedtuple
import logbook
import numpy as np
from numpy import (
array,
int64,
float64,
full,
iinfo,
integer,
issubdtype,
nan,
uint32,
zeros,
)
from pandas import (
DataFrame,
DatetimeIndex,
read_csv,
Timestamp,
NaT,
isnull,
)
from pandas.tslib import iNaT
from six import (
iteritems,
with_metaclass,
viewkeys,
)
from zipline.utils.functional import apply
from zipline.utils.preprocess import call
from zipline.utils.input_validation import (
coerce_string,
preprocess,
expect_element,
verify_indices_all_unique,
)
from zipline.utils.sqlite_utils import group_into_chunks
from zipline.utils.memoize import lazyval
from zipline.utils.cli import maybe_show_progress
from ._equities import _compute_row_slices, _read_bcolz_data
from ._adjustments import load_adjustments_from_sqlite
logger = logbook.Logger('UsEquityPricing')
OHLC = frozenset(['open', 'high', 'low', 'close'])
US_EQUITY_PRICING_BCOLZ_COLUMNS = (
'open', 'high', 'low', 'close', 'volume', 'day', 'id'
)
SQLITE_ADJUSTMENT_COLUMN_DTYPES = {
'effective_date': integer,
'ratio': float,
'sid': integer,
}
SQLITE_ADJUSTMENT_TABLENAMES = frozenset(['splits', 'dividends', 'mergers'])
SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES = {
'sid': integer,
'ex_date': integer,
'declared_date': integer,
'record_date': integer,
'pay_date': integer,
'amount': float,
}
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES = {
'sid': integer,
'ex_date': integer,
'declared_date': integer,
'record_date': integer,
'pay_date': integer,
'payment_sid': integer,
'ratio': float,
}
UINT32_MAX = iinfo(uint32).max
class NoDataOnDate(Exception):
"""
Raised when a spot price can be found for the sid and date.
"""
pass
def check_uint32_safe(value, colname):
if value >= UINT32_MAX:
raise ValueError(
"Value %s from column '%s' is too large" % (value, colname)
)
@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})
def winsorise_uint32(df, invalid_data_behavior, column, *columns):
"""Drops any record where a value would not fit into a uint32.
Parameters
----------
df : pd.DataFrame
The dataframe to winsorise.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is outside the bounds of a uint32.
*columns : iterable[str]
The names of the columns to check.
Returns
-------
truncated : pd.DataFrame
``df`` with values that do not fit into a uint32 zeroed out.
"""
columns = list((column,) + columns)
mask = df[columns] > UINT32_MAX
if invalid_data_behavior != 'ignore':
mask |= df[columns].isnull()
else:
# we are not going to generate a warning or error for this so just use
# nan_to_num
df[columns] = np.nan_to_num(df[columns])
mv = mask.values
if mv.any():
if invalid_data_behavior == 'raise':
raise ValueError(
'%d values out of bounds for uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
)
if invalid_data_behavior == 'warn':
warnings.warn(
'Ignoring %d values because they are out of bounds for'
' uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
stacklevel=3, # one extra frame for `expect_element`
)
df[mask] = 0
return df
@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})
def to_ctable(raw_data, invalid_data_behavior):
if isinstance(raw_data, ctable):
# we already have a ctable so do nothing
return raw_data
winsorise_uint32(raw_data, invalid_data_behavior, 'volume', *OHLC)
processed = (raw_data[list(OHLC)] * 1000).astype('uint32')
dates = raw_data.index.values.astype('datetime64[s]')
check_uint32_safe(dates.max().view(np.int64), 'day')
processed['day'] = dates.astype('uint32')
processed['volume'] = raw_data.volume.astype('uint32')
return ctable.fromdataframe(processed)
class BcolzDailyBarWriter(object):
"""
Class capable of writing daily OHLCV data to disk in a format that can be
read efficiently by BcolzDailyOHLCVReader.
Parameters
----------
filename : str
The location at which we should write our output.
calendar : pandas.DatetimeIndex
Calendar to use to compute asset calendar offsets.
See Also
--------
zipline.data.us_equity_pricing.BcolzDailyBarReader
"""
_csv_dtypes = {
'open': float64,
'high': float64,
'low': float64,
'close': float64,
'volume': float64,
}
def __init__(self, filename, calendar):
self._filename = filename
self._calendar = calendar
@property
def progress_bar_message(self):
return "Merging daily equity files:"
def progress_bar_item_show_func(self, value):
return value if value is None else str(value[0])
def write(self,
data,
assets=None,
show_progress=False,
invalid_data_behavior='warn'):
"""
Parameters
----------
data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]]
The data chunks to write. Each chunk should be a tuple of sid
and the data for that asset.
assets : set[int], optional
The assets that should be in ``data``. If this is provided
we will check ``data`` against the assets and provide better
progress information.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional
What to do when data is encountered that is outside the range of
a uint32.
Returns
-------
table : bcolz.ctable
The newly-written table.
"""
ctx = maybe_show_progress(
((sid, to_ctable(df, invalid_data_behavior)) for sid, df in data),
show_progress=show_progress,
item_show_func=self.progress_bar_item_show_func,
label=self.progress_bar_message,
length=len(assets) if assets is not None else None,
)
with ctx as it:
return self._write_internal(it, assets)
def write_csvs(self,
asset_map,
show_progress=False,
invalid_data_behavior='warn'):
"""Read CSVs as DataFrames from our asset map.
Parameters
----------
asset_map : dict[int -> str]
A mapping from asset id to file path with the CSV data for that
asset
show_progress : bool
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is encountered that is outside the range of
a uint32.
"""
read = partial(
read_csv,
parse_dates=['day'],
index_col='day',
dtype=self._csv_dtypes,
)
return self.write(
((asset, read(path)) for asset, path in iteritems(asset_map)),
assets=viewkeys(asset_map),
show_progress=show_progress,
invalid_data_behavior=invalid_data_behavior,
)
def _write_internal(self, iterator, assets):
"""
Internal implementation of write.
`iterator` should be an iterator yielding pairs of (asset, ctable).
"""
total_rows = 0
first_row = {}
last_row = {}
calendar_offset = {}
# Maps column name -> output carray.
columns = {
k: carray(array([], dtype=uint32))
for k in US_EQUITY_PRICING_BCOLZ_COLUMNS
}
earliest_date = None
calendar = self._calendar
if assets is not None:
@apply
def iterator(iterator=iterator, assets=set(assets)):
for asset_id, table in iterator:
if asset_id not in assets:
raise ValueError('unknown asset id %r' % asset_id)
yield asset_id, table
for asset_id, table in iterator:
nrows = len(table)
for column_name in columns:
if column_name == 'id':
# We know what the content of this column is, so don't
# bother reading it.
columns['id'].append(
full((nrows,), asset_id, dtype='uint32'),
)
continue
columns[column_name].append(table[column_name])
if earliest_date is None:
earliest_date = table["day"][0]
else:
earliest_date = min(earliest_date, table["day"][0])
# Bcolz doesn't support ints as keys in `attrs`, so convert
# assets to strings for use as attr keys.
asset_key = str(asset_id)
# Calculate the index into the array of the first and last row
# for this asset. This allows us to efficiently load single
# assets when querying the data back out of the table.
first_row[asset_key] = total_rows
last_row[asset_key] = total_rows + nrows - 1
total_rows += nrows
# Calculate the number of trading days between the first date
# in the stored data and the first date of **this** asset. This
# offset used for output alignment by the reader.
asset_first_day = table['day'][0]
calendar_offset[asset_key] = calendar.get_loc(
Timestamp(asset_first_day, unit='s', tz='UTC'),
)
# This writes the table to disk.
full_table = ctable(
columns=[
columns[colname]
for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS
],
names=US_EQUITY_PRICING_BCOLZ_COLUMNS,
rootdir=self._filename,
mode='w',
)
full_table.attrs['first_trading_day'] = (
earliest_date // 1e6
if earliest_date is not None else
iNaT
)
full_table.attrs['first_row'] = first_row
full_table.attrs['last_row'] = last_row
full_table.attrs['calendar_offset'] = calendar_offset
full_table.attrs['calendar'] = calendar.asi8.tolist()
full_table.flush()
return full_table
class DailyBarReader(with_metaclass(ABCMeta)):
"""
Reader for OHCLV pricing data at a daily frequency.
"""
@abstractmethod
def load_raw_arrays(self, columns, start_date, end_date, assets):
pass
@abstractmethod
def spot_price(self, sid, day, colname):
pass
@abstractproperty
def last_available_dt(self):
pass
class BcolzDailyBarReader(DailyBarReader):
"""
Reader for raw pricing data written by BcolzDailyOHLCVWriter.
A Bcolz CTable is comprised of Columns and Attributes.
Columns
-------
The table with which this loader interacts contains the following columns:
['open', 'high', 'low', 'close', 'volume', 'day', 'id'].
The data in these columns is interpreted as follows:
- Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 *
as-traded dollar value.
- Volume is interpreted as as-traded volume.
- Day is interpreted as seconds since midnight UTC, Jan 1, 1970.
- Id is the asset id of the row.
The data in each column is grouped by asset and then sorted by day within
each asset block.
The table is built to represent a long time range of data, e.g. ten years
of equity data, so the lengths of each asset block is not equal to each
other. The blocks are clipped to the known start and end date of each asset
to cut down on the number of empty values that would need to be included to
make a regular/cubic dataset.
When read across the open, high, low, close, and volume with the same
index should represent the same asset and day.
Parameters
----------
table : bcolz.ctable
The ctable contaning the pricing data, with attrs corresponding to the
Attributes list below.
read_all_threshold : int
The number of equities at which;
below, the data is read by reading a slice from the carray
per asset.
above, the data is read by pulling all of the data for all assets
into memory and then indexing into that array for each day and
asset pair.
Used to tune performance of reads when using a small or large number
of equities.
Attributes
----------
The table with which this loader interacts contains the following
attributes:
first_row : dict
Map from asset_id -> index of first row in the dataset with that id.
last_row : dict
Map from asset_id -> index of last row in the dataset with that id.
calendar_offset : dict
Map from asset_id -> calendar index of first row.
calendar : list[int64]
Calendar used to compute offsets, in asi8 format (ns since EPOCH).
We use first_row and last_row together to quickly find ranges of rows to
load when reading an asset's data into memory.
We use calendar_offset and calendar to orient loaded blocks within a
range of queried dates.
See Also
--------
zipline.data.us_equity_pricing.BcolzDailyBarWriter
"""
def __init__(self, table, read_all_threshold=3000):
self._maybe_table_rootdir = table
# Cache of fully read np.array for the carrays in the daily bar table.
# raw_array does not use the same cache, but it could.
# Need to test keeping the entire array in memory for the course of a
# process first.
self._spot_cols = {}
self.PRICE_ADJUSTMENT_FACTOR = 0.001
self._read_all_threshold = read_all_threshold
@lazyval
def _table(self):
maybe_table_rootdir = self._maybe_table_rootdir
if isinstance(maybe_table_rootdir, ctable):
return maybe_table_rootdir
return ctable(rootdir=maybe_table_rootdir, mode='r')
@lazyval
def _calendar(self):
return DatetimeIndex(self._table.attrs['calendar'], tz='UTC')
@lazyval
def _first_rows(self):
return {
int(asset_id): start_index
for asset_id, start_index in iteritems(
self._table.attrs['first_row'],
)
}
@lazyval
def _last_rows(self):
return {
int(asset_id): end_index
for asset_id, end_index in iteritems(
self._table.attrs['last_row'],
)
}
@lazyval
def _calendar_offsets(self):
return {
int(id_): offset
for id_, offset in iteritems(
self._table.attrs['calendar_offset'],
)
}
@lazyval
def first_trading_day(self):
try:
return Timestamp(
self._table.attrs['first_trading_day'],
unit='ms',
tz='UTC'
)
except KeyError:
return None
@property
def last_available_dt(self):
return self._calendar[-1]
def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start_idx,
end_idx,
assets,
)
def load_raw_arrays(self, columns, start_date, end_date, assets):
# Assumes that the given dates are actually in calendar.
start_idx = self._calendar.get_loc(start_date)
end_idx = self._calendar.get_loc(end_date)
first_rows, last_rows, offsets = self._compute_slices(
start_idx,
end_idx,
assets,
)
read_all = len(assets) > self._read_all_threshold
return _read_bcolz_data(
self._table,
(end_idx - start_idx + 1, len(assets)),
list(columns),
first_rows,
last_rows,
offsets,
read_all,
)
def _spot_col(self, colname):
"""
Get the colname from daily_bar_table and read all of it into memory,
caching the result.
Parameters
----------
colname : string
A name of a OHLCV carray in the daily_bar_table
Returns
-------
array (uint32)
Full read array of the carray in the daily_bar_table with the
given colname.
"""
try:
col = self._spot_cols[colname]
except KeyError:
col = self._spot_cols[colname] = self._table[colname]
return col
def get_last_traded_dt(self, asset, day):
volumes = self._spot_col('volume')
if day >= asset.end_date:
# go back to one day before the asset ended
search_day = self._calendar[
self._calendar.searchsorted(asset.end_date) - 1
]
else:
search_day = day
while True:
try:
ix = self.sid_day_index(asset, search_day)
except NoDataOnDate:
return None
if volumes[ix] != 0:
return search_day
prev_day_ix = self._calendar.get_loc(search_day) - 1
if prev_day_ix > -1:
search_day = self._calendar[prev_day_ix]
else:
return None
def sid_day_index(self, sid, day):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
int
Index into the data tape for the given sid and day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
"""
try:
day_loc = self._calendar.get_loc(day)
except:
raise NoDataOnDate("day={0} is outside of calendar={1}".format(
day, self._calendar))
offset = day_loc - self._calendar_offsets[sid]
if offset < 0:
raise NoDataOnDate(
"No data on or before day={0} for sid={1}".format(
day, sid))
ix = self._first_rows[sid] + offset
if ix > self._last_rows[sid]:
raise NoDataOnDate(
"No data on or after day={0} for sid={1}".format(
day, sid))
return ix
def spot_price(self, sid, day, colname):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
ix = self.sid_day_index(sid, day)
price = self._spot_col(colname)[ix]
if price == 0:
return -1
if colname != 'volume':
return price * 0.001
else:
return price
class PanelDailyBarReader(DailyBarReader):
"""
Reader for data passed as Panel.
DataPanel Structure
-------
items : Int64Index
Asset identifiers. Must be unique.
major_axis : DatetimeIndex
Dates for data provided provided by the Panel. Must be unique.
minor_axis : ['open', 'high', 'low', 'close', 'volume']
Price attributes. Must be unique.
Attributes
----------
The table with which this loader interacts contains the following
attributes:
panel : pd.Panel
The panel from which to read OHLCV data.
first_trading_day : pd.Timestamp
The first trading day in the dataset.
"""
@preprocess(panel=call(verify_indices_all_unique))
def __init__(self, calendar, panel):
panel = panel.copy()
if 'volume' not in panel.minor_axis:
# Fake volume if it does not exist.
panel.loc[:, :, 'volume'] = int(1e9)
self.first_trading_day = panel.major_axis[0]
self._calendar = calendar
self.panel = panel
@property
def last_available_dt(self):
return self._calendar[-1]
def load_raw_arrays(self, columns, start_date, end_date, assets):
columns = list(columns)
cal = self._calendar
index = cal[cal.slice_indexer(start_date, end_date)]
shape = (len(index), len(assets))
results = []
for col in columns:
outbuf = zeros(shape=shape)
for i, asset in enumerate(assets):
data = self.panel.loc[asset, start_date:end_date, col]
data = data.reindex_axis(index).values
outbuf[:, i] = data
results.append(outbuf)
return results
def spot_price(self, sid, day, colname):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
return self.panel.loc[sid, day, colname]
def get_last_traded_dt(self, sid, dt):
"""
Parameters
----------
sid : int
The asset identifier.
dt : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
pd.Timestamp : The last know dt for the asset and dt;
NaT if no trade is found before the given dt.
"""
while dt in self.panel.major_axis:
freq = self.panel.major_axis.freq
if not isnull(self.panel.loc[sid, dt, 'close']):
return dt
dt -= freq
else:
return NaT
class SQLiteAdjustmentWriter(object):
"""
Writer for data to be read by SQLiteAdjustmentReader
Parameters
----------
conn_or_path : str or sqlite3.Connection
A handle to the target sqlite database.
daily_bar_reader : BcolzDailyBarReader
Daily bar reader to use for dividend writes.
overwrite : bool, optional, default=False
If True and conn_or_path is a string, remove any existing files at the
given path before connecting.
See Also
--------
zipline.data.us_equity_pricing.SQLiteAdjustmentReader
"""
def __init__(self,
conn_or_path,
daily_bar_reader,
calendar,
overwrite=False):
if isinstance(conn_or_path, sqlite3.Connection):
self.conn = conn_or_path
elif isinstance(conn_or_path, str):
if overwrite and exists(conn_or_path):
try:
remove(conn_or_path)
except OSError as e:
if e.errno != ENOENT:
raise
self.conn = sqlite3.connect(conn_or_path)
self.uri = conn_or_path
else:
raise TypeError("Unknown connection type %s" % type(conn_or_path))
self._daily_bar_reader = daily_bar_reader
self._calendar = calendar
def _write(self, tablename, expected_dtypes, frame):
if frame is None or frame.empty:
# keeping the dtypes correct for empty frames is not easy
frame = DataFrame(
np.array([], dtype=list(expected_dtypes.items())),
)
else:
if frozenset(frame.columns) != viewkeys(expected_dtypes):
raise ValueError(
"Unexpected frame columns:\n"
"Expected Columns: %s\n"
"Received Columns: %s" % (
set(expected_dtypes),
frame.columns.tolist(),
)
)
actual_dtypes = frame.dtypes
for colname, expected in iteritems(expected_dtypes):
actual = actual_dtypes[colname]
if not issubdtype(actual, expected):
raise TypeError(
"Expected data of type {expected} for column"
" '{colname}', but got '{actual}'.".format(
expected=expected,
colname=colname,
actual=actual,
),
)
frame.to_sql(
tablename,
self.conn,
if_exists='append',
chunksize=50000,
)
def write_frame(self, tablename, frame):
if tablename not in SQLITE_ADJUSTMENT_TABLENAMES:
raise ValueError(
"Adjustment table %s not in %s" % (
tablename,
SQLITE_ADJUSTMENT_TABLENAMES,
)
)
if not (frame is None or frame.empty):
frame = frame.copy()
frame['effective_date'] = frame['effective_date'].values.astype(
'datetime64[s]',
).astype('int64')
return self._write(
tablename,
SQLITE_ADJUSTMENT_COLUMN_DTYPES,
frame,
)
def write_dividend_payouts(self, frame):
"""
Write dividend payout data to SQLite table `dividend_payouts`.
"""
return self._write(
'dividend_payouts',
SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES,
frame,
)
def write_stock_dividend_payouts(self, frame):
return self._write(
'stock_dividend_payouts',
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES,
frame,
)
def calc_dividend_ratios(self, dividends):
"""
Calculate the ratios to apply to equities when looking back at pricing
history so that the price is smoothed over the ex_date, when the market
adjusts to the change in equity value due to upcoming dividend.
Returns
-------
DataFrame
A frame in the same format as splits and mergers, with keys
- sid, the id of the equity
- effective_date, the date in seconds on which to apply the ratio.
- ratio, the ratio to apply to backwards looking pricing data.
"""
if dividends is None:
return DataFrame(np.array(
[],
dtype=[
('sid', uint32),
('effective_date', uint32),
('ratio', float64),
],
))
ex_dates = dividends.ex_date.values
sids = dividends.sid.values
amounts = dividends.amount.values
ratios = full(len(amounts), nan)
daily_bar_reader = self._daily_bar_reader
effective_dates = full(len(amounts), -1, dtype=int64)
calendar = self._calendar
for i, amount in enumerate(amounts):
sid = sids[i]
ex_date = ex_dates[i]
day_loc = calendar.get_loc(ex_date, method='bfill')
prev_close_date = calendar[day_loc - 1]
try:
prev_close = daily_bar_reader.spot_price(
sid, prev_close_date, 'close')
if prev_close != 0.0:
ratio = 1.0 - amount / prev_close
ratios[i] = ratio
# only assign effective_date when data is found
effective_dates[i] = ex_date
except NoDataOnDate:
logger.warn("Couldn't compute ratio for dividend %s" % {
'sid': sid,
'ex_date': ex_date,
'amount': amount,
})
continue
# Create a mask to filter out indices in the effective_date, sid, and
# ratio vectors for which a ratio was not calculable.
effective_mask = effective_dates != -1
effective_dates = effective_dates[effective_mask]
effective_dates = effective_dates.astype('datetime64[ns]').\
astype('datetime64[s]').astype(uint32)
sids = sids[effective_mask]
ratios = ratios[effective_mask]
return DataFrame({
'sid': sids,
'effective_date': effective_dates,
'ratio': ratios,
})
def _write_dividends(self, dividends):
if dividends is None:
dividend_payouts = None
else:
dividend_payouts = dividends.copy()
dividend_payouts['ex_date'] = dividend_payouts['ex_date'].values.\
astype('datetime64[s]').astype(integer)
dividend_payouts['record_date'] = \
dividend_payouts['record_date'].values.astype('datetime64[s]').\
astype(integer)
dividend_payouts['declared_date'] = \
dividend_payouts['declared_date'].values.astype('datetime64[s]').\
astype(integer)
dividend_payouts['pay_date'] = \
dividend_payouts['pay_date'].values.astype('datetime64[s]').\
astype(integer)
self.write_dividend_payouts(dividend_payouts)
def _write_stock_dividends(self, stock_dividends):
if stock_dividends is None:
stock_dividend_payouts = None
else:
stock_dividend_payouts = stock_dividends.copy()
stock_dividend_payouts['ex_date'] = \
stock_dividend_payouts['ex_date'].values.\
astype('datetime64[s]').astype(integer)
stock_dividend_payouts['record_date'] = \
stock_dividend_payouts['record_date'].values.\
astype('datetime64[s]').astype(integer)
stock_dividend_payouts['declared_date'] = \
stock_dividend_payouts['declared_date'].\
values.astype('datetime64[s]').astype(integer)
stock_dividend_payouts['pay_date'] = \
stock_dividend_payouts['pay_date'].\
values.astype('datetime64[s]').astype(integer)
self.write_stock_dividend_payouts(stock_dividend_payouts)
def write_dividend_data(self, dividends, stock_dividends=None):
"""
Write both dividend payouts and the derived price adjustment ratios.
"""
# First write the dividend payouts.
self._write_dividends(dividends)
self._write_stock_dividends(stock_dividends)
# Second from the dividend payouts, calculate ratios.
dividend_ratios = self.calc_dividend_ratios(dividends)
self.write_frame('dividends', dividend_ratios)
def write(self,
splits=None,
mergers=None,
dividends=None,
stock_dividends=None):
"""
Writes data to a SQLite file to be read by SQLiteAdjustmentReader.
Parameters
----------
splits : pandas.DataFrame, optional
Dataframe containing split data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is divided by this value.
sid : int
The asset id associated with this adjustment.
mergers : pandas.DataFrame, optional
DataFrame containing merger data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is unaffected.
sid : int
The asset id associated with this adjustment.
dividends : pandas.DataFrame, optional
DataFrame containing dividend data. The format of the dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
amount : float
The cash amount paid for each share.
Dividend ratios are calculated as:
``1.0 - (dividend_value / "close on day prior to ex_date")``
stock_dividends : pandas.DataFrame, optional
DataFrame containing stock dividend data. The format of the
dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
payment_sid : int
The asset id of the shares that should be paid instead of
cash.
ratio : float
The ratio of currently held shares in the held sid that
should be paid with new shares of the payment_sid.
See Also
--------
zipline.data.us_equity_pricing.SQLiteAdjustmentReader
"""
self.write_frame('splits', splits)
self.write_frame('mergers', mergers)
self.write_dividend_data(dividends, stock_dividends)
self.conn.execute(
"CREATE INDEX splits_sids "
"ON splits(sid)"
)
self.conn.execute(
"CREATE INDEX splits_effective_date "
"ON splits(effective_date)"
)
self.conn.execute(
"CREATE INDEX mergers_sids "
"ON mergers(sid)"
)
self.conn.execute(
"CREATE INDEX mergers_effective_date "
"ON mergers(effective_date)"
)
self.conn.execute(
"CREATE INDEX dividends_sid "
"ON dividends(sid)"
)
self.conn.execute(
"CREATE INDEX dividends_effective_date "
"ON dividends(effective_date)"
)
self.conn.execute(
"CREATE INDEX dividend_payouts_sid "
"ON dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX dividends_payouts_ex_date "
"ON dividend_payouts(ex_date)"
)
self.conn.execute(
"CREATE INDEX stock_dividend_payouts_sid "
"ON stock_dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX stock_dividends_payouts_ex_date "
"ON stock_dividend_payouts(ex_date)"
)
def close(self):
self.conn.close()
UNPAID_QUERY_TEMPLATE = """
SELECT sid, amount, pay_date from dividend_payouts
WHERE ex_date=? AND sid IN ({0})
"""
Dividend = namedtuple('Dividend', ['asset', 'amount', 'pay_date'])
UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE = """
SELECT sid, payment_sid, ratio, pay_date from stock_dividend_payouts
WHERE ex_date=? AND sid IN ({0})
"""
StockDividend = namedtuple(
'StockDividend',
['asset', 'payment_asset', 'ratio', 'pay_date'])
class SQLiteAdjustmentReader(object):
"""
Loads adjustments based on corporate actions from a SQLite database.
Expects data written in the format output by `SQLiteAdjustmentWriter`.
Parameters
----------
conn : str or sqlite3.Connection
Connection from which to load data.
See Also
--------
zipline.data.us_equity_pricing.SQLiteAdjustmentWriter
"""
@preprocess(conn=coerce_string(sqlite3.connect))
def __init__(self, conn):
self.conn = conn
def load_adjustments(self, columns, dates, assets):
return load_adjustments_from_sqlite(
self.conn,
list(columns),
dates,
assets,
)
def get_adjustments_for_sid(self, table_name, sid):
t = (sid,)
c = self.conn.cursor()
adjustments_for_sid = c.execute(
"SELECT effective_date, ratio FROM %s WHERE sid = ?" %
table_name, t).fetchall()
c.close()
return [[Timestamp(adjustment[0], unit='s', tz='UTC'), adjustment[1]]
for adjustment in
adjustments_for_sid]
def get_dividends_with_ex_date(self, assets, date, asset_finder):
seconds = date.value / int(1e9)
c = self.conn.cursor()
divs = []
for chunk in group_into_chunks(assets):
query = UNPAID_QUERY_TEMPLATE.format(
",".join(['?' for _ in chunk]))
t = (seconds,) + tuple(map(lambda x: int(x), chunk))
c.execute(query, t)
rows = c.fetchall()
for row in rows:
div = Dividend(
asset_finder.retrieve_asset(row[0]),
row[1], Timestamp(row[2], unit='s', tz='UTC'))
divs.append(div)
c.close()
return divs
def get_stock_dividends_with_ex_date(self, assets, date, asset_finder):
seconds = date.value / int(1e9)
c = self.conn.cursor()
stock_divs = []
for chunk in group_into_chunks(assets):
query = UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE.format(
",".join(['?' for _ in chunk]))
t = (seconds,) + tuple(map(lambda x: int(x), chunk))
c.execute(query, t)
rows = c.fetchall()
for row in rows:
stock_div = StockDividend(
asset_finder.retrieve_asset(row[0]), # asset
asset_finder.retrieve_asset(row[1]), # payment_asset
row[2],
Timestamp(row[3], unit='s', tz='UTC'))
stock_divs.append(stock_div)
c.close()
return stock_divs
|
apache-2.0
|
fredhusser/scikit-learn
|
examples/covariance/plot_outlier_detection.py
|
235
|
3891
|
"""
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
|
bsd-3-clause
|
soulmachine/scikit-learn
|
examples/manifold/plot_swissroll.py
|
330
|
1446
|
"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
|
bsd-3-clause
|
ajaybhat/scikit-image
|
doc/examples/xx_applications/plot_rank_filters.py
|
4
|
20058
|
"""
============
Rank filters
============
Rank filters are non-linear filters using the local gray-level ordering to
compute the filtered value. This ensemble of filters share a common base: the
local gray-level histogram is computed on the neighborhood of a pixel (defined
by a 2-D structuring element). If the filtered value is taken as the middle
value of the histogram, we get the classical median filter.
Rank filters can be used for several purposes such as:
* image quality enhancement
e.g. image smoothing, sharpening
* image pre-processing
e.g. noise reduction, contrast enhancement
* feature extraction
e.g. border detection, isolated point detection
* post-processing
e.g. small object removal, object grouping, contour smoothing
Some well known filters are specific cases of rank filters [1]_ e.g.
morphological dilation, morphological erosion, median filters.
In this example, we will see how to filter a gray-level image using some of the
linear and non-linear filters available in skimage. We use the `camera` image
from `skimage.data` for all comparisons.
.. [1] Pierre Soille, On morphological operators based on rank filters, Pattern
Recognition 35 (2002) 527-535.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
from skimage import data
noisy_image = img_as_ubyte(data.camera())
hist = np.histogram(noisy_image, bins=np.arange(0, 256))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3))
ax1.imshow(noisy_image, interpolation='nearest', cmap=plt.cm.gray)
ax1.axis('off')
ax2.plot(hist[1][:-1], hist[0], lw=2)
ax2.set_title('Histogram of grey values')
######################################################################
#
# Noise removal
# =============
#
# Some noise is added to the image, 1% of pixels are randomly set to 255, 1%
# are randomly set to 0. The **median** filter is applied to remove the
# noise.
from skimage.filters.rank import median
from skimage.morphology import disk
noise = np.random.random(noisy_image.shape)
noisy_image = img_as_ubyte(data.camera())
noisy_image[noise > 0.99] = 255
noisy_image[noise < 0.01] = 0
fig, axes = plt.subplots(2, 2, figsize=(10, 7), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray)
ax[0].set_title('Noisy image')
ax[0].axis('off')
ax[0].set_adjustable('box-forced')
ax[1].imshow(median(noisy_image, disk(1)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax[1].set_title('Median $r=1$')
ax[1].axis('off')
ax[1].set_adjustable('box-forced')
ax[2].imshow(median(noisy_image, disk(5)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax[2].set_title('Median $r=5$')
ax[2].axis('off')
ax[2].set_adjustable('box-forced')
ax[3].imshow(median(noisy_image, disk(20)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax[3].set_title('Median $r=20$')
ax[3].axis('off')
ax[3].set_adjustable('box-forced')
######################################################################
#
# The added noise is efficiently removed, as the image defaults are small (1
# pixel wide), a small filter radius is sufficient. As the radius is
# increasing, objects with bigger sizes are filtered as well, such as the
# camera tripod. The median filter is often used for noise removal because
# borders are preserved and e.g. salt and pepper noise typically does not
# distort the gray-level.
#
# Image smoothing
# ===============
#
# The example hereunder shows how a local **mean** filter smooths the camera
# man image.
from skimage.filters.rank import mean
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 7], sharex=True, sharey=True)
loc_mean = mean(noisy_image, disk(10))
ax1.imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax2.imshow(loc_mean, vmin=0, vmax=255, cmap=plt.cm.gray)
ax2.set_title('Local mean $r=10$')
ax2.axis('off')
ax2.set_adjustable('box-forced')
######################################################################
#
# One may be interested in smoothing an image while preserving important
# borders (median filters already achieved this), here we use the
# **bilateral** filter that restricts the local neighborhood to pixel having
# a gray-level similar to the central one.
#
# .. note::
#
# A different implementation is available for color images in
# `skimage.filters.denoise_bilateral`.
from skimage.filters.rank import mean_bilateral
noisy_image = img_as_ubyte(data.camera())
bilat = mean_bilateral(noisy_image.astype(np.uint16), disk(20), s0=10, s1=10)
fig, axes = plt.subplots(2, 2, figsize=(10, 7), sharex='row', sharey='row')
ax = axes.ravel()
ax[0].imshow(noisy_image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[0].axis('off')
ax[0].set_adjustable('box-forced')
ax[1].imshow(bilat, cmap=plt.cm.gray)
ax[1].set_title('Bilateral mean')
ax[1].axis('off')
ax[1].set_adjustable('box-forced')
ax[2].imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax[2].axis('off')
ax[2].set_adjustable('box-forced')
ax[3].imshow(bilat[200:350, 350:450], cmap=plt.cm.gray)
ax[3].axis('off')
ax[3].set_adjustable('box-forced')
######################################################################
# One can see that the large continuous part of the image (e.g. sky) is
# smoothed whereas other details are preserved.
#
# Contrast enhancement
# ====================
#
# We compare here how the global histogram equalization is applied locally.
#
# The equalized image [2]_ has a roughly linear cumulative distribution
# function for each pixel neighborhood. The local version [3]_ of the
# histogram equalization emphasizes every local gray-level variations.
#
# .. [2] http://en.wikipedia.org/wiki/Histogram_equalization
# .. [3] http://en.wikipedia.org/wiki/Adaptive_histogram_equalization
from skimage import exposure
from skimage.filters import rank
noisy_image = img_as_ubyte(data.camera())
# equalize globally and locally
glob = exposure.equalize_hist(noisy_image) * 255
loc = rank.equalize(noisy_image, disk(20))
# extract histogram for each image
hist = np.histogram(noisy_image, bins=np.arange(0, 256))
glob_hist = np.histogram(glob, bins=np.arange(0, 256))
loc_hist = np.histogram(loc, bins=np.arange(0, 256))
fig, axes = plt.subplots(3, 2, figsize=(10, 10))
ax = axes.ravel()
ax[0].imshow(noisy_image, interpolation='nearest', cmap=plt.cm.gray)
ax[0].axis('off')
ax[1].plot(hist[1][:-1], hist[0], lw=2)
ax[1].set_title('Histogram of gray values')
ax[2].imshow(glob, interpolation='nearest', cmap=plt.cm.gray)
ax[2].axis('off')
ax[3].plot(glob_hist[1][:-1], glob_hist[0], lw=2)
ax[3].set_title('Histogram of gray values')
ax[4].imshow(loc, interpolation='nearest', cmap=plt.cm.gray)
ax[4].axis('off')
ax[5].plot(loc_hist[1][:-1], loc_hist[0], lw=2)
ax[5].set_title('Histogram of gray values')
######################################################################
# Another way to maximize the number of gray-levels used for an image is to
# apply a local auto-leveling, i.e. the gray-value of a pixel is
# proportionally remapped between local minimum and local maximum.
#
# The following example shows how local auto-level enhances the camara man
# picture.
from skimage.filters.rank import autolevel
noisy_image = img_as_ubyte(data.camera())
auto = autolevel(noisy_image.astype(np.uint16), disk(20))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 7], sharex=True, sharey=True)
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax2.imshow(auto, cmap=plt.cm.gray)
ax2.set_title('Local autolevel')
ax2.axis('off')
ax2.set_adjustable('box-forced')
######################################################################
# This filter is very sensitive to local outliers, see the little white spot
# in the left part of the sky. This is due to a local maximum which is very
# high comparing to the rest of the neighborhood. One can moderate this using
# the percentile version of the auto-level filter which uses given
# percentiles (one inferior, one superior) in place of local minimum and
# maximum. The example below illustrates how the percentile parameters
# influence the local auto-level result.
from skimage.filters.rank import autolevel_percentile
image = data.camera()
selem = disk(20)
loc_autolevel = autolevel(image, selem=selem)
loc_perc_autolevel0 = autolevel_percentile(image, selem=selem, p0=.00, p1=1.0)
loc_perc_autolevel1 = autolevel_percentile(image, selem=selem, p0=.01, p1=.99)
loc_perc_autolevel2 = autolevel_percentile(image, selem=selem, p0=.05, p1=.95)
loc_perc_autolevel3 = autolevel_percentile(image, selem=selem, p0=.1, p1=.9)
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(7, 8), sharex=True, sharey=True)
plt.gray()
title_list = ['Original',
'auto_level',
'auto-level 0%',
'auto-level 1%',
'auto-level 5%',
'auto-level 10%']
image_list = [image,
loc_autolevel,
loc_perc_autolevel0,
loc_perc_autolevel1,
loc_perc_autolevel2,
loc_perc_autolevel3]
axes_list = axes.ravel()
for i in range(0, len(image_list)):
axes_list[i].imshow(image_list[i], cmap=plt.cm.gray, vmin=0, vmax=255)
axes_list[i].set_title(title_list[i])
axes_list[i].axis('off')
axes_list[i].set_adjustable('box-forced')
######################################################################
# The morphological contrast enhancement filter replaces the central pixel by
# the local maximum if the original pixel value is closest to local maximum,
# otherwise by the minimum local.
from skimage.filters.rank import enhance_contrast
noisy_image = img_as_ubyte(data.camera())
enh = enhance_contrast(noisy_image, disk(5))
fig, axes = plt.subplots(2, 2, figsize=[10, 7], sharex='row', sharey='row')
ax = axes.ravel()
ax[0].imshow(noisy_image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[0].axis('off')
ax[0].set_adjustable('box-forced')
ax[1].imshow(enh, cmap=plt.cm.gray)
ax[1].set_title('Local morphological contrast enhancement')
ax[1].axis('off')
ax[1].set_adjustable('box-forced')
ax[2].imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax[2].axis('off')
ax[2].set_adjustable('box-forced')
ax[3].imshow(enh[200:350, 350:450], cmap=plt.cm.gray)
ax[3].axis('off')
ax[3].set_adjustable('box-forced')
######################################################################
# The percentile version of the local morphological contrast enhancement uses
# percentile *p0* and *p1* instead of the local minimum and maximum.
from skimage.filters.rank import enhance_contrast_percentile
noisy_image = img_as_ubyte(data.camera())
penh = enhance_contrast_percentile(noisy_image, disk(5), p0=.1, p1=.9)
fig, axes = plt.subplots(2, 2, figsize=[10, 7], sharex='row', sharey='row')
ax = axes.ravel()
ax[0].imshow(noisy_image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[1].imshow(penh, cmap=plt.cm.gray)
ax[1].set_title('Local percentile morphological\n contrast enhancement')
ax[2].imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax[3].imshow(penh[200:350, 350:450], cmap=plt.cm.gray)
for a in ax:
a.axis('off')
a.set_adjustable('box-forced')
######################################################################
#
# Image threshold
# ===============
#
# The Otsu threshold [1]_ method can be applied locally using the local gray-
# level distribution. In the example below, for each pixel, an "optimal"
# threshold is determined by maximizing the variance between two classes of
# pixels of the local neighborhood defined by a structuring element.
#
# The example compares the local threshold with the global threshold
# `skimage.filters.threshold_otsu`.
#
# .. note::
#
# Local is much slower than global thresholding. A function for global
# Otsu thresholding can be found in : `skimage.filters.threshold_otsu`.
#
# .. [4] http://en.wikipedia.org/wiki/Otsu's_method
from skimage.filters.rank import otsu
from skimage.filters import threshold_otsu
p8 = data.page()
radius = 10
selem = disk(radius)
# t_loc_otsu is an image
t_loc_otsu = otsu(p8, selem)
loc_otsu = p8 >= t_loc_otsu
# t_glob_otsu is a scalar
t_glob_otsu = threshold_otsu(p8)
glob_otsu = p8 >= t_glob_otsu
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
ax = axes.ravel()
fig.colorbar(ax1.imshow(p8, cmap=plt.cm.gray), ax=ax1)
ax[0].set_title('Original')
fig.colorbar(ax[1].imshow(t_loc_otsu, cmap=plt.cm.gray), ax=ax2)
ax[1].set_title('Local Otsu ($r=%d$)' % radius)
ax[2].imshow(p8 >= t_loc_otsu, cmap=plt.cm.gray)
ax[2].set_title('Original >= local Otsu' % t_glob_otsu)
ax[3].imshow(glob_otsu, cmap=plt.cm.gray)
ax[3].set_title('Global Otsu ($t=%d$)' % t_glob_otsu)
for a in ax:
a.axis('off')
a.set_adjustable('box-forced')
######################################################################
# The following example shows how local Otsu thresholding handles a global
# level shift applied to a synthetic image.
n = 100
theta = np.linspace(0, 10 * np.pi, n)
x = np.sin(theta)
m = (np.tile(x, (n, 1)) * np.linspace(0.1, 1, n) * 128 + 128).astype(np.uint8)
radius = 10
t = rank.otsu(m, disk(radius))
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)
ax1.imshow(m)
ax1.set_title('Original')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax2.imshow(m >= t, interpolation='nearest')
ax2.set_title('Local Otsu ($r=%d$)' % radius)
ax2.axis('off')
ax2.set_adjustable('box-forced')
######################################################################
# Image morphology
# ================
#
# Local maximum and local minimum are the base operators for gray-level
# morphology.
#
# .. note::
#
# `skimage.dilate` and `skimage.erode` are equivalent filters (see below
# for comparison).
#
# Here is an example of the classical morphological gray-level filters:
# opening, closing and morphological gradient.
from skimage.filters.rank import maximum, minimum, gradient
noisy_image = img_as_ubyte(data.camera())
closing = maximum(minimum(noisy_image, disk(5)), disk(5))
opening = minimum(maximum(noisy_image, disk(5)), disk(5))
grad = gradient(noisy_image, disk(5))
# display results
fig, axes = plt.subplots(2, 2, figsize=[10, 7], sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(noisy_image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[1].imshow(closing, cmap=plt.cm.gray)
ax[1].set_title('Gray-level closing')
ax[2].imshow(opening, cmap=plt.cm.gray)
ax[2].set_title('Gray-level opening')
ax[3].imshow(grad, cmap=plt.cm.gray)
ax[3].set_title('Morphological gradient')
for a in ax:
a.axis('off')
a.set_adjustable('box-forced')
######################################################################
#
# Feature extraction
# ===================
#
# Local histograms can be exploited to compute local entropy, which is
# related to the local image complexity. Entropy is computed using base 2
# logarithm i.e. the filter returns the minimum number of bits needed to
# encode local gray-level distribution.
#
# `skimage.rank.entropy` returns the local entropy on a given structuring
# element. The following example shows applies this filter on 8- and 16-bit
# images.
#
# .. note::
#
# to better use the available image bit, the function returns 10x entropy
# for 8-bit images and 1000x entropy for 16-bit images.
from skimage import data
from skimage.filters.rank import entropy
from skimage.morphology import disk
import numpy as np
import matplotlib.pyplot as plt
image = data.camera()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), sharex=True, sharey=True)
fig.colorbar(ax1.imshow(image, cmap=plt.cm.gray), ax=ax1)
ax1.set_title('Image')
ax1.axis('off')
ax1.set_adjustable('box-forced')
fig.colorbar(ax2.imshow(entropy(image, disk(5)), cmap=plt.cm.gray), ax=ax2)
ax2.set_title('Entropy')
ax2.axis('off')
ax2.set_adjustable('box-forced')
######################################################################
#
# Implementation
# ==============
#
# The central part of the `skimage.rank` filters is build on a sliding window
# that updates the local gray-level histogram. This approach limits the
# algorithm complexity to O(n) where n is the number of image pixels. The
# complexity is also limited with respect to the structuring element size.
#
# In the following we compare the performance of different implementations
# available in `skimage`.
from time import time
from scipy.ndimage import percentile_filter
from skimage.morphology import dilation
from skimage.filters.rank import median, maximum
def exec_and_timeit(func):
"""Decorator that returns both function results and execution time."""
def wrapper(*arg):
t1 = time()
res = func(*arg)
t2 = time()
ms = (t2 - t1) * 1000.0
return (res, ms)
return wrapper
@exec_and_timeit
def cr_med(image, selem):
return median(image=image, selem=selem)
@exec_and_timeit
def cr_max(image, selem):
return maximum(image=image, selem=selem)
@exec_and_timeit
def cm_dil(image, selem):
return dilation(image=image, selem=selem)
@exec_and_timeit
def ndi_med(image, n):
return percentile_filter(image, 50, size=n * 2 - 1)
######################################################################
# Comparison between
#
# * `filters.rank.maximum`
# * `morphology.dilate`
#
# on increasing structuring element size:
a = data.camera()
rec = []
e_range = range(1, 20, 2)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_max(a, elem)
rcm, ms_rcm = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to element size')
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Element radius')
ax.plot(e_range, rec)
ax.legend(['filters.rank.maximum', 'morphology.dilate'])
######################################################################
# and increasing image size:
r = 9
elem = disk(r + 1)
rec = []
s_range = range(100, 1000, 100)
for s in s_range:
a = (np.random.random((s, s)) * 256).astype(np.uint8)
(rc, ms_rc) = cr_max(a, elem)
(rcm, ms_rcm) = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to image size')
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Image size')
ax.plot(s_range, rec)
ax.legend(['filters.rank.maximum', 'morphology.dilate'])
######################################################################
# Comparison between:
#
# * `filters.rank.median`
# * `scipy.ndimage.percentile`
#
# on increasing structuring element size:
a = data.camera()
rec = []
e_range = range(2, 30, 4)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_med(a, elem)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_ndi))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to element size')
ax.plot(e_range, rec)
ax.legend(['filters.rank.median', 'scipy.ndimage.percentile'])
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Element radius')
######################################################################
# Comparison of outcome of the three methods:
fig, (ax0, ax1) = plt.subplots(ncols=2, sharex=True, sharey=True)
ax0.set_title('filters.rank.median')
ax0.imshow(rc)
ax0.axis('off')
ax0.set_adjustable('box-forced')
ax1.set_title('scipy.ndimage.percentile')
ax1.imshow(rndi)
ax1.axis('off')
ax1.set_adjustable('box-forced')
######################################################################
# and increasing image size:
r = 9
elem = disk(r + 1)
rec = []
s_range = [100, 200, 500, 1000]
for s in s_range:
a = (np.random.random((s, s)) * 256).astype(np.uint8)
(rc, ms_rc) = cr_med(a, elem)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_ndi))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to image size')
ax.plot(s_range, rec)
ax.legend(['filters.rank.median', 'scipy.ndimage.percentile'])
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Image size')
|
bsd-3-clause
|
HyperloopTeam/FullOpenMDAO
|
lib/python2.7/site-packages/matplotlib/testing/image_util.py
|
11
|
3765
|
# This module contains some functionality from the Python Imaging
# Library, that has been ported to use Numpy arrays rather than PIL
# Image objects.
# The Python Imaging Library is
# Copyright (c) 1997-2009 by Secret Labs AB
# Copyright (c) 1995-2009 by Fredrik Lundh
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
# Permission to use, copy, modify, and distribute this software and its
# associated documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appears in all
# copies, and that both that copyright notice and this permission notice
# appear in supporting documentation, and that the name of Secret Labs
# AB or the author not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from matplotlib.cbook import deprecated, warn_deprecated
warn_deprecated('1.4.0', name='matplotlib.testing.image_util',
obj_type='module')
@deprecated('1.4.0')
def autocontrast(image, cutoff=0):
"""
Maximize image contrast, based on histogram. This completely
ignores the alpha channel.
"""
assert image.dtype == np.uint8
output_image = np.empty((image.shape[0], image.shape[1], 3), np.uint8)
for i in xrange(0, 3):
plane = image[:,:,i]
output_plane = output_image[:,:,i]
h = np.histogram(plane, bins=256)[0]
if cutoff:
# cut off pixels from both ends of the histogram
# get number of pixels
n = 0
for ix in xrange(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff / 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] = h[lo] - cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the hi end
cut = n * cutoff / 100
for hi in xrange(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] = h[hi] - cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in xrange(256):
if h[lo]:
break
for hi in xrange(255, -1, -1):
if h[hi]:
break
if hi <= lo:
output_plane[:,:] = plane
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
lut = np.arange(256, dtype=np.float)
lut *= scale
lut += offset
lut = lut.clip(0, 255)
lut = lut.astype(np.uint8)
output_plane[:,:] = lut[plane]
return output_image
|
gpl-2.0
|
karec/oct
|
oct/results/report.py
|
2
|
4007
|
import six
import time
from collections import defaultdict
import ujson as json
import pandas as pd
from oct.results.models import db, Result, Turret
class ReportResults(object):
"""Represent a report containing all tests results
:param int run_time: the run_time of the script
:param int interval: the time interval between each group of results
"""
def __init__(self, run_time, interval):
self.total_transactions = 0
self.total_errors = Result.select(Result.id).where(Result.error != "", Result.error != None).count()
self.total_timers = 0
self.timers_results = {}
self._timers_values = defaultdict(list)
self.turrets = []
self.main_results = {}
self.interval = interval
self._init_turrets()
def _init_dates(self):
"""Initialize all dates properties
"""
if self.total_transactions == 0:
return None
self.epoch_start = Result.select(Result.epoch).order_by(Result.epoch.asc()).limit(1).get().epoch
self.epoch_finish = Result.select(Result.epoch).order_by(Result.epoch.desc()).limit(1).get().epoch
self.start_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_start))
self.finish_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_finish))
def _init_dataframes(self):
"""Initialise the main dataframe for the results and the custom timers dataframes
"""
df = pd.read_sql_query("SELECT elapsed, epoch, scriptrun_time, custom_timers FROM result ORDER BY epoch ASC",
db.get_conn())
self._get_all_timers(df)
self.main_results = self._get_processed_dataframe(df)
# create all custom timers dataframes
for key, value in six.iteritems(self._timers_values):
df = pd.DataFrame(value, columns=['epoch', 'scriptrun_time'])
df.index = pd.to_datetime(df['epoch'], unit='s')
timer_results = self._get_processed_dataframe(df)
self.timers_results[key] = timer_results
# clear memory
del self._timers_values
def _get_all_timers(self, dataframe):
"""Get all timers and set them in the _timers_values property
:param pandas.DataFrame dataframe: the main dataframe with row results
"""
s = dataframe['custom_timers'].apply(json.loads)
s.index = dataframe['epoch']
for index, value in s.iteritems():
if not value:
continue
for key, value in six.iteritems(value):
self._timers_values[key].append((index, value))
self.total_timers += 1
del dataframe['custom_timers']
del s
def _get_processed_dataframe(self, dataframe):
"""Generate required dataframe for results from raw dataframe
:param pandas.DataFrame dataframe: the raw dataframe
:return: a dict containing raw, compiled, and summary dataframes from original dataframe
:rtype: dict
"""
dataframe.index = pd.to_datetime(dataframe['epoch'], unit='s', utc=True)
del dataframe['epoch']
summary = dataframe.describe(percentiles=[.80, .90, .95]).transpose().loc['scriptrun_time']
df_grp = dataframe.groupby(pd.TimeGrouper('{}S'.format(self.interval)))
df_final = df_grp.apply(lambda x: x.describe(percentiles=[.80, .90, .95])['scriptrun_time'])
return {
"raw": dataframe.round(2),
"compiled": df_final.round(2),
"summary": summary.round(2)
}
def _init_turrets(self):
"""Setup data from database
"""
for turret in Turret.select():
self.turrets.append(turret.to_dict())
def compile_results(self):
"""Compile all results for the current test
"""
self._init_dataframes()
self.total_transactions = len(self.main_results['raw'])
self._init_dates()
|
mit
|
jgdwyer/nn-convection
|
sknn_jgd/nn.py
|
3
|
26071
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, unicode_literals, print_function)
__all__ = ['Regressor', 'Classifier', 'Layer', 'Convolution']
import os
import sys
import time
import logging
import itertools
import collections
log = logging.getLogger('sknn')
import numpy
import theano
class ansi:
BOLD = '\033[1;97m'
WHITE = '\033[0;97m'
YELLOW = '\033[0;33m'
RED = '\033[0;31m'
GREEN = '\033[0;32m'
BLUE = '\033[0;94m'
ENDC = '\033[0m'
class Layer(object):
"""
Specification for a layer to be passed to the neural network during construction. This
includes a variety of parameters to configure each layer based on its activation type.
Parameters
----------
type: str
Select which activation function this layer should use, as a string. Specifically,
options are ``Rectifier``, ``Sigmoid``, ``Tanh``, and ``ExpLin`` for non-linear layers
and ``Linear`` or ``Softmax`` for output layers.
name: str, optional
You optionally can specify a name for this layer, and its parameters
will then be accessible to scikit-learn via a nested sub-object. For example,
if name is set to ``layer1``, then the parameter ``layer1__units`` from the network
is bound to this layer's ``units`` variable.
The name defaults to ``hiddenN`` where N is the integer index of that layer, and the
final layer is always ``output`` without an index.
units: int
The number of units (also known as neurons) in this layer. This applies to all
layer types except for convolution.
weight_decay: float, optional
The coefficient for L1 or L2 regularization of the weights. For example, a value of
0.0001 is multiplied by the L1 or L2 weight decay equation.
dropout: float, optional
The ratio of inputs to drop out for this layer during training. For example, 0.25
means that 25% of the inputs will be excluded for each training sample, with the
remaining inputs being renormalized accordingly.
normalize: str, optional
Enable normalization of this layer. Can be either `batch` for batch normalization
or (soon) `weights` for weight normalization. Default is no normalization.
frozen: bool, optional
Specify whether to freeze a layer's parameters so they are not adjusted during the
training. This is useful when relying on pre-trained neural networks.
warning: None
You should use keyword arguments after `type` when initializing this object. If not,
the code will raise an AssertionError.
"""
def __init__(
self,
type,
warning=None,
name=None,
units=None,
weight_decay=None,
dropout=None,
normalize=None,
frozen=False):
assert warning is None,\
"Specify layer parameters as keyword arguments, not positional arguments."
if type not in ['Rectifier', 'Sigmoid', 'Tanh', 'Linear', 'Softmax', 'Gaussian', 'ExpLin']:
raise NotImplementedError("Layer type `%s` is not implemented." % type)
self.name = name
self.type = type
self.units = units
self.weight_decay = weight_decay
self.dropout = dropout
self.normalize = normalize
self.frozen = frozen
def set_params(self, **params):
"""Setter for internal variables that's compatible with ``scikit-learn``.
"""
for k, v in params.items():
if k not in self.__dict__:
raise ValueError("Invalid parameter `%s` for layer `%s`." % (k, self.name))
self.__dict__[k] = v
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
copy = self.__dict__.copy()
del copy['type']
params = ", ".join(["%s=%r" % (k, v) for k, v in copy.items() if v is not None])
return "<sknn.nn.%s `%s`: %s>" % (self.__class__.__name__, self.type, params)
class Native(object):
"""Special type of layer that is handled directly to the backend (e.g. Lasagne). This
can be used to construct more advanced networks that are not yet supported by the
default interface.
Note that using this as a layer type means your code may not be compatible with future
revisions or other backends, and that serialization may be affected.
Parameters
----------
constructor: class or callable
The layer type usable directly by the backend (e.g. Lasagne). This can also
be a callable function that acts as a layer constructor.
*args: list of arguments
All positional arguments are passed directly to the constructor when the
neural network is initialized.
**kwargs: dictionary of named arguments
All named arguments are passed to the constructor directly also, with the exception
of the parameters ``name``, ``units``, ``frozen``, ``weight_decay``, ``normalize``
which take on the same role as in :class:`sknn.nn.Layer`.
"""
def __init__(self, constructor, *args, **keywords):
for attr in ['name', 'units', 'frozen', 'weight_decay', 'normalize']:
setattr(self, attr, keywords.pop(attr, None))
self.type = constructor
self.args = args
self.keywords = keywords
class Convolution(Layer):
"""
Specification for a convolution layer to be passed to the neural network in construction.
This includes a variety of convolution-specific parameters to configure each layer, as well
as activation-specific parameters.
Parameters
----------
type: str
Select which activation function this convolution layer should use, as a string.
For hidden layers, you can use the following convolution types ``Rectifier``,
``ExpLin``, ``Sigmoid``, ``Tanh`` or ``Linear``.
name: str, optional
You optionally can specify a name for this layer, and its parameters
will then be accessible to scikit-learn via a nested sub-object. For example,
if name is set to ``layer1``, then the parameter ``layer1__units`` from the network
is bound to this layer's ``units`` variable.
The name defaults to ``hiddenN`` where N is the integer index of that layer, and the
final layer is always ``output`` without an index.
channels: int
Number of output channels for the convolution layers. Each channel has its own
set of shared weights which are trained by applying the kernel over the image.
kernel_shape: tuple of ints
A two-dimensional tuple of integers corresponding to the shape of the kernel when
convolution is used. For example, this could be a square kernel `(3,3)` or a full
horizontal or vertical kernel on the input matrix, e.g. `(N,1)` or `(1,N)`.
kernel_stride: tuple of ints, optional
A two-dimensional tuple of integers that represents the steps taken by the kernel
through the input image. By default, this is set to `(1,1)` and can be
customized separately to pooling.
border_mode: str
String indicating the way borders in the image should be processed, one of two options:
* `valid` — Only pixels from input where the kernel fits within bounds are processed.
* `full` — All pixels from input are processed, and the boundaries are zero-padded.
* `same` — The output resolution is set to the exact same as the input.
The size of the output will depend on this mode, for `full` it's identical to the input,
but for `valid` (default) it will be smaller or equal.
pool_shape: tuple of ints, optional
A two-dimensional tuple of integers corresponding to the pool size for downsampling.
This should be square, for example `(2,2)` to reduce the size by half, or `(4,4)` to make
the output a quarter of the original.
Pooling is applied after the convolution and calculation of its activation.
pool_type: str, optional
Type of the pooling to be used; can be either `max` or `mean`. If a `pool_shape` is
specified the default is to take the maximum value of all inputs that fall into this
pool. Otherwise, the default is None and no pooling is used for performance.
scale_factor: tuple of ints, optional
A two-dimensional tuple of integers corresponding to upscaling ration. This should be
square, for example `(2,2)` to increase the size by double, or `(4,4)` to make the
output four times the original.
Upscaling is applied before the convolution and calculation of its activation.
weight_decay: float, optional
The coefficient for L1 or L2 regularization of the weights. For example, a value of
0.0001 is multiplied by the L1 or L2 weight decay equation.
dropout: float, optional
The ratio of inputs to drop out for this layer during training. For example, 0.25
means that 25% of the inputs will be excluded for each training sample, with the
remaining inputs being renormalized accordingly.
normalize: str, optional
Enable normalization of this layer. Can be either `batch` for batch normalization
or (soon) `weights` for weight normalization. Default is no normalization.
frozen: bool, optional
Specify whether to freeze a layer's parameters so they are not adjusted during the
training. This is useful when relying on pre-trained neural networks.
warning: None
You should use keyword arguments after `type` when initializing this object. If not,
the code will raise an AssertionError.
"""
def __init__(
self,
type,
warning=None,
name=None,
channels=None,
kernel_shape=None,
kernel_stride=None,
border_mode='valid',
pool_shape=None,
pool_type=None,
scale_factor=None,
weight_decay=None,
dropout=None,
normalize=None,
frozen=False):
assert warning is None,\
"Specify layer parameters as keyword arguments, not positional arguments."
if type not in ['Rectifier', 'Sigmoid', 'Tanh', 'Linear', 'ExpLin']:
raise NotImplementedError("Convolution type `%s` is not implemented." % (type,))
if border_mode not in ['valid', 'full', 'same']:
raise NotImplementedError("Convolution border_mode `%s` is not implemented." % (border_mode,))
super(Convolution, self).__init__(
type,
name=name,
weight_decay=weight_decay,
dropout=dropout,
normalize=normalize,
frozen=frozen)
self.channels = channels
self.kernel_shape = kernel_shape
self.kernel_stride = kernel_stride or (1,1)
self.border_mode = border_mode
self.pool_shape = pool_shape or (1,1)
self.pool_type = pool_type or ('max' if pool_shape else None)
self.scale_factor = scale_factor or (1,1)
class NeuralNetwork(object):
"""
Abstract base class for wrapping all neural network functionality from PyLearn2,
common to multi-layer perceptrons in :mod:`sknn.mlp` and auto-encoders in
in :mod:`sknn.ae`.
Parameters
----------
layers: list of Layer
An iterable sequence of each layer each as a :class:`sknn.mlp.Layer` instance that
contains its type, optional name, and any paramaters required.
* For hidden layers, you can use the following layer types:
``Rectifier``, ``ExpLin``, ``Sigmoid``, ``Tanh``, or ``Convolution``.
* For output layers, you can use the following layer types:
``Linear`` or ``Softmax``.
It's possible to mix and match any of the layer types, though most often
you should probably use hidden and output types as recommended here. Typically,
the last entry in this ``layers`` list should contain ``Linear`` for regression,
or ``Softmax`` for classification.
random_state: int, optional
Seed for the initialization of the neural network parameters (e.g.
weights and biases). This is fully deterministic.
parameters: list of tuple of array-like, optional
A list of ``(weights, biases)`` tuples to be reloaded for each layer, in the same
order as ``layers`` was specified. Useful for initializing with pre-trained
networks.
learning_rule: str, optional
Name of the learning rule used during stochastic gradient descent,
one of ``sgd``, ``momentum``, ``nesterov``, ``adadelta``, ``adagrad`` or
``rmsprop`` at the moment. The default is vanilla ``sgd``.
learning_rate: float, optional
Real number indicating the default/starting rate of adjustment for
the weights during gradient descent. Different learning rules may
take this into account differently. Default is ``0.01``.
learning_momentum: float, optional
Real number indicating the momentum factor to be used for the
learning rule 'momentum'. Default is ``0.9``.
batch_size: int, optional
Number of training samples to group together when performing stochastic
gradient descent (technically, a "minibatch"). By default each sample is
treated on its own, with ``batch_size=1``. Larger batches are usually faster.
n_iter: int, optional
The number of iterations of gradient descent to perform on the
neural network's weights when training with ``fit()``.
n_stable: int, optional
Number of interations after which training should return when the validation
error remains (near) constant. This is usually a sign that the data has been
fitted, or that optimization may have stalled. If no validation set is specified,
then stability is judged based on the training error. Default is ``10``.
f_stable: float, optional
Threshold under which the validation error change is assumed to be stable, to
be used in combination with `n_stable`. This is calculated as a relative ratio
of improvement, so if the results are only 0.1% better training is considered
stable. The training set is used as fallback if there's no validation set. Default
is ``0.001`.
valid_set: tuple of array-like, optional
Validation set (X_v, y_v) to be used explicitly while training. Both
arrays should have the same size for the first dimention, and the second
dimention should match with the training data specified in ``fit()``.
valid_size: float, optional
Ratio of the training data to be used for validation. 0.0 means no
validation, and 1.0 would mean there's no training data! Common values are
0.1 or 0.25.
normalize: string, optional
Enable normalization for all layers. Can be either `batch` for batch normalization
or (soon) `weights` for weight normalization. Default is no normalization.
regularize: string, optional
Which regularization technique to use on the weights, for example ``L2`` (most
common) or ``L1`` (quite rare), as well as ``dropout``. By default, there's no
regularization, unless another parameter implies it should be enabled, e.g. if
``weight_decay`` or ``dropout_rate`` are specified.
weight_decay: float, optional
The coefficient used to multiply either ``L1`` or ``L2`` equations when computing
the weight decay for regularization. If ``regularize`` is specified, this defaults
to 0.0001.
dropout_rate: float, optional
What rate to use for drop-out training in the inputs (jittering) and the
hidden layers, for each training example. Specify this as a ratio of inputs
to be randomly excluded during training, e.g. 0.75 means only 25% of inputs
will be included in the training.
loss_type: string, optional
The cost function to use when training the network. There are two valid options:
* ``mse`` — Use mean squared error, for learning to predict the mean of the data.
* ``mae`` — Use mean average error, for learning to predict the median of the data.
* ``mcc`` — Use mean categorical cross-entropy, particularly for classifiers.
The default option is ``mse`` for regressors and ``mcc`` for classifiers, but ``mae`` can
only be applied to layers of type ``Linear`` or ``Gaussian`` and they must be used as
the output layer (PyLearn2 only).
callback: callable or dict, optional
An observer mechanism that exposes information about the inner training loop. This is
either a single function that takes ``cbs(event, **variables)`` as a parameter, or a
dictionary of functions indexed by on `event` string that conforms to ``cb(**variables)``.
There are multiple events sent from the inner training loop:
* ``on_train_start`` — Called when the main training function is entered.
* ``on_epoch_start`` — Called the first thing when a new iteration starts.
* ``on_batch_start`` — Called before an individual batch is processed.
* ``on_batch_finish`` — Called after that individual batch is processed.
* ``on_epoch_finish`` — Called the first last when the iteration is done.
* ``on_train_finish`` — Called just before the training function exits.
For each function, the ``variables`` dictionary passed contains all local variables within
the training implementation.
debug: bool, optional
Should the underlying training algorithms perform validation on the data
as it's optimizing the model? This makes things slower, but errors can
be caught more effectively. Default is off.
verbose: bool, optional
How to initialize the logging to display the results during training. If there is
already a logger initialized, either ``sknn`` or the root logger, then this function
does nothing. Otherwise:
* ``False`` — Setup new logger that shows only warnings and errors.
* ``True`` — Setup a new logger that displays all debug messages.
* ``None`` — Don't setup a new logger under any condition (default).
Using the built-in python ``logging`` module, you can control the detail and style of
output by customising the verbosity level and formatter for ``sknn`` logger.
warning: None
You should use keyword arguments after `layers` when initializing this object. If not,
the code will raise an AssertionError.
"""
def __init__(
self,
layers,
warning=None,
parameters=None,
random_state=None,
learning_rule='sgd',
learning_rate=0.01,
learning_momentum=0.9,
normalize=None,
regularize=None,
weight_decay=None,
dropout_rate=None,
batch_size=1,
n_iter=None,
n_stable=10,
f_stable=0.001,
valid_set=None,
valid_size=0.0,
loss_type=None,
callback=None,
debug=False,
verbose=None,
**params):
assert warning is None,\
"Specify network parameters as keyword arguments, not positional arguments."
self.layers = []
for i, layer in enumerate(layers):
assert isinstance(layer, Layer) or isinstance(layer, Native),\
"Specify each layer as an instance of a `sknn.mlp.Layer` object."
# Layer names are optional, if not specified then generate one.
if layer.name is None:
layer.name = ("hidden%i" % i) if i < len(layers)-1 else "output"
# sklearn may pass layers in as additional named parameters, remove them.
if layer.name in params:
del params[layer.name]
self.layers.append(layer)
# Don't support any additional parameters that are not in the constructor.
# These are specified only so `get_params()` can return named layers, for double-
# underscore syntax to work.
assert len(params) == 0,\
"The specified additional parameters are unknown: %s." % ','.join(params.keys())
# Basic checking of the freeform string options.
assert regularize in (None, 'L1', 'L2', 'dropout'),\
"Unknown type of regularization specified: %s." % regularize
assert loss_type in ('mse', 'mae', 'mcc', None),\
"Unknown loss function type specified: %s." % loss_type
self.weights = parameters
self.random_state = random_state
self.learning_rule = learning_rule
self.learning_rate = learning_rate
self.learning_momentum = learning_momentum
self.normalize = normalize
self.regularize = regularize or ('dropout' if dropout_rate else None)\
or ('L2' if weight_decay else None)
self.weight_decay = weight_decay
self.dropout_rate = dropout_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.n_stable = n_stable
self.f_stable = f_stable
self.valid_set = valid_set
self.valid_size = valid_size
self.loss_type = loss_type
self.debug = debug
self.verbose = verbose
self.callback = callback
self.auto_enabled = {}
self._backend = None
self._create_logger()
self._setup()
def _setup(self):
raise NotImplementedError("NeuralNetwork is an abstract class; "
"use the mlp.Classifier or mlp.Regressor instead.")
@property
def is_initialized(self):
"""Check if the neural network was setup already.
"""
return self._backend is not None and self._backend.is_initialized
def is_convolution(self, input=None, output=False):
"""Check whether this neural network includes convolution layers in the first
or last position.
Parameters
----------
input : boolean, optional
Whether the first layer should be checked for convolution. Default True.
output : boolean, optional
Whether the last layer should be checked for convolution. Default False.
Returns
-------
is_conv : boolean
True if either of the specified layers are indeed convolution, False otherwise.
"""
check_output = output
check_input = False if check_output and input is None else True
i = check_input and isinstance(self.layers[0], Convolution)
o = check_output and isinstance(self.layers[-1], Convolution)
return i or o
@property
def is_classifier(self):
"""Is this neural network instanced as a classifier or regressor?"""
return False
def _create_logger(self):
# If users have configured logging already, assume they know best.
if len(log.handlers) > 0 or len(log.parent.handlers) > 0 or self.verbose is None:
return
# Otherwise setup a default handler and formatter based on verbosity.
lvl = logging.DEBUG if self.verbose else logging.WARNING
fmt = logging.Formatter("%(message)s")
hnd = logging.StreamHandler(stream=sys.stdout)
hnd.setFormatter(fmt)
hnd.setLevel(lvl)
log.addHandler(hnd)
log.setLevel(lvl)
def get_parameters(self):
"""Extract the neural networks weights and biases layer by layer. Only valid
once the neural network has been initialized, for example via `fit()` function.
Returns
-------
params : list of tuples
For each layer in the order they are passed to the constructor, a named-tuple
of three items `weights`, `biases` (both numpy arrays) and `name` (string)
in that order.
"""
assert self._backend is not None,\
"Backend was not initialized; could not retrieve network parameters."
P = collections.namedtuple('Parameters', 'weights biases layer')
return [P(w, b, s.name) for s, (w, b) in zip(self.layers, self._backend._mlp_to_array())]
def set_parameters(self, storage):
"""Store the given weighs and biases into the neural network. If the neural network
has not been initialized, use the `weights` list as construction parameter instead.
Otherwise if the neural network is initialized, this function will extract the parameters
from the input list or dictionary and store them accordingly.
Parameters
----------
storage : list of tuples, or dictionary of tuples
Either a list of tuples for each layer, storing two items `weights` and `biases` in
the exact same order as construction. Alternatively, if this is a dictionary, a string
to tuple mapping for each layer also storing `weights` and `biases` but not necessarily
for all layers.
"""
# In case the class is not initialized, store the parameters for later during _initialize.
if self._backend is None:
self.weights = storage
return
if isinstance(storage, dict):
layers = [storage.get(l.name, None) for l in self.layers]
else:
layers = storage
return self._backend._array_to_mlp(layers, self._backend.mlp)
|
apache-2.0
|
daodaoliang/neural-network-animation
|
matplotlib/textpath.py
|
11
|
16650
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import warnings
if six.PY3:
from urllib.parse import quote as urllib_quote
else:
from urllib import quote as urllib_quote
import numpy as np
from matplotlib.path import Path
from matplotlib import rcParams
import matplotlib.font_manager as font_manager
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ft2font import LOAD_TARGET_LIGHT
from matplotlib.mathtext import MathTextParser
import matplotlib.dviread as dviread
from matplotlib.font_manager import FontProperties
from matplotlib.transforms import Affine2D
class TextToPath(object):
"""
A class that convert a given text to a path using ttf fonts.
"""
FONT_SCALE = 100.
DPI = 72
def __init__(self):
"""
Initialization
"""
self.mathtext_parser = MathTextParser('path')
self.tex_font_map = None
from matplotlib.cbook import maxdict
self._ps_fontd = maxdict(50)
self._texmanager = None
self._adobe_standard_encoding = None
def _get_adobe_standard_encoding(self):
enc_name = dviread.find_tex_file('8a.enc')
enc = dviread.Encoding(enc_name)
return dict([(c, i) for i, c in enumerate(enc.encoding)])
def _get_font(self, prop):
"""
find a ttf font.
"""
fname = font_manager.findfont(prop)
font = FT2Font(fname)
font.set_size(self.FONT_SCALE, self.DPI)
return font
def _get_hinting_flag(self):
return LOAD_NO_HINTING
def _get_char_id(self, font, ccode):
"""
Return a unique id for the given font and character-code set.
"""
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1, 0, 0, 6)].decode('macroman')
except KeyError:
ps_name = sfnt[(3, 1, 0x0409, 6)].decode('utf-16be')
char_id = urllib_quote('%s-%x' % (ps_name, ccode))
return char_id
def _get_char_id_ps(self, font, ccode):
"""
Return a unique id for the given font and character-code set (for tex).
"""
ps_name = font.get_ps_font_info()[2]
char_id = urllib_quote('%s-%d' % (ps_name, ccode))
return char_id
def glyph_to_path(self, font, currx=0.):
"""
convert the ft2font glyph to vertices and codes.
"""
verts, codes = font.get_path()
if currx != 0.0:
verts[:, 0] += currx
return verts, codes
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=None)
return w, h, d
fontsize = prop.get_size_in_points()
scale = float(fontsize) / self.FONT_SCALE
if ismath:
prop = prop.copy()
prop.set_size(self.FONT_SCALE)
width, height, descent, trash, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width * scale, height * scale, descent * scale
font = self._get_font(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
return w * scale, h * scale, d * scale
def get_text_path(self, prop, s, ismath=False, usetex=False):
"""
convert text *s* to path (a tuple of vertices and codes for
matplotlib.path.Path).
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. Effective only if usetex == False.
"""
if not usetex:
if not ismath:
font = self._get_font(prop)
glyph_info, glyph_map, rects = self.get_glyphs_with_font(
font, s)
else:
glyph_info, glyph_map, rects = self.get_glyphs_mathtext(
prop, s)
else:
glyph_info, glyph_map, rects = self.get_glyphs_tex(prop, s)
verts, codes = [], []
for glyph_id, xposition, yposition, scale in glyph_info:
verts1, codes1 = glyph_map[glyph_id]
if len(verts1):
verts1 = np.array(verts1) * scale + [xposition, yposition]
verts.extend(verts1)
codes.extend(codes1)
for verts1, codes1 in rects:
verts.extend(verts1)
codes.extend(codes1)
return verts, codes
def get_glyphs_with_font(self, font, s, glyph_map=None,
return_new_glyphs_only=False):
"""
convert the string *s* to vertices and codes using the
provided ttf font.
"""
# Mostly copied from backend_svg.py.
cmap = font.get_charmap()
lastgind = None
currx = 0
xpositions = []
glyph_ids = []
if glyph_map is None:
glyph_map = dict()
if return_new_glyphs_only:
glyph_map_new = dict()
else:
glyph_map_new = glyph_map
# I'm not sure if I get kernings right. Needs to be verified. -JJL
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
gind = 0
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
horiz_advance = (glyph.linearHoriAdvance / 65536.0)
char_id = self._get_char_id(font, ccode)
if char_id not in glyph_map:
glyph_map_new[char_id] = self.glyph_to_path(font)
currx += (kern / 64.0)
xpositions.append(currx)
glyph_ids.append(char_id)
currx += horiz_advance
lastgind = gind
ypositions = [0] * len(xpositions)
sizes = [1.] * len(xpositions)
rects = []
return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
glyph_map_new, rects)
def get_glyphs_mathtext(self, prop, s, glyph_map=None,
return_new_glyphs_only=False):
"""
convert the string *s* to vertices and codes by parsing it with
mathtext.
"""
prop = prop.copy()
prop.set_size(self.FONT_SCALE)
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.DPI, prop)
if not glyph_map:
glyph_map = dict()
if return_new_glyphs_only:
glyph_map_new = dict()
else:
glyph_map_new = glyph_map
xpositions = []
ypositions = []
glyph_ids = []
sizes = []
currx, curry = 0, 0
for font, fontsize, ccode, ox, oy in glyphs:
char_id = self._get_char_id(font, ccode)
if char_id not in glyph_map:
font.clear()
font.set_size(self.FONT_SCALE, self.DPI)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
glyph_map_new[char_id] = self.glyph_to_path(font)
xpositions.append(ox)
ypositions.append(oy)
glyph_ids.append(char_id)
size = fontsize / self.FONT_SCALE
sizes.append(size)
myrects = []
for ox, oy, w, h in rects:
vert1 = [(ox, oy), (ox, oy + h), (ox + w, oy + h),
(ox + w, oy), (ox, oy), (0, 0)]
code1 = [Path.MOVETO,
Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
myrects.append((vert1, code1))
return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
glyph_map_new, myrects)
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def get_glyphs_tex(self, prop, s, glyph_map=None,
return_new_glyphs_only=False):
"""
convert the string *s* to vertices and codes using matplotlib's usetex
mode.
"""
# codes are modstly borrowed from pdf backend.
texmanager = self.get_texmanager()
if self.tex_font_map is None:
self.tex_font_map = dviread.PsfontsMap(
dviread.find_tex_file('pdftex.map'))
if self._adobe_standard_encoding is None:
self._adobe_standard_encoding = self._get_adobe_standard_encoding()
fontsize = prop.get_size_in_points()
if hasattr(texmanager, "get_dvi"):
dvifilelike = texmanager.get_dvi(s, self.FONT_SCALE)
dvi = dviread.DviFromFileLike(dvifilelike, self.DPI)
else:
dvifile = texmanager.make_dvi(s, self.FONT_SCALE)
dvi = dviread.Dvi(dvifile, self.DPI)
try:
page = next(iter(dvi))
finally:
dvi.close()
if glyph_map is None:
glyph_map = dict()
if return_new_glyphs_only:
glyph_map_new = dict()
else:
glyph_map_new = glyph_map
glyph_ids, xpositions, ypositions, sizes = [], [], [], []
# Gather font information and do some setup for combining
# characters into strings.
# oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
font_and_encoding = self._ps_fontd.get(dvifont.texname)
font_bunch = self.tex_font_map[dvifont.texname]
if font_and_encoding is None:
font = FT2Font(font_bunch.filename)
for charmap_name, charmap_code in [("ADOBE_CUSTOM",
1094992451),
("ADOBE_STANDARD",
1094995778)]:
try:
font.select_charmap(charmap_code)
except ValueError:
pass
else:
break
else:
charmap_name = ""
warnings.warn("No supported encoding in font (%s)." %
font_bunch.filename)
if charmap_name == "ADOBE_STANDARD" and font_bunch.encoding:
enc0 = dviread.Encoding(font_bunch.encoding)
enc = dict([(i, self._adobe_standard_encoding.get(c, None))
for i, c in enumerate(enc0.encoding)])
else:
enc = dict()
self._ps_fontd[dvifont.texname] = font, enc
else:
font, enc = font_and_encoding
ft2font_flag = LOAD_TARGET_LIGHT
char_id = self._get_char_id_ps(font, glyph)
if char_id not in glyph_map:
font.clear()
font.set_size(self.FONT_SCALE, self.DPI)
if enc:
charcode = enc.get(glyph, None)
else:
charcode = glyph
if charcode is not None:
glyph0 = font.load_char(charcode, flags=ft2font_flag)
else:
warnings.warn("The glyph (%d) of font (%s) cannot be "
"converted with the encoding. Glyph may "
"be wrong" % (glyph, font_bunch.filename))
glyph0 = font.load_char(glyph, flags=ft2font_flag)
glyph_map_new[char_id] = self.glyph_to_path(font)
glyph_ids.append(char_id)
xpositions.append(x1)
ypositions.append(y1)
sizes.append(dvifont.size / self.FONT_SCALE)
myrects = []
for ox, oy, h, w in page.boxes:
vert1 = [(ox, oy), (ox + w, oy), (ox + w, oy + h),
(ox, oy + h), (ox, oy), (0, 0)]
code1 = [Path.MOVETO,
Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
myrects.append((vert1, code1))
return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
glyph_map_new, myrects)
text_to_path = TextToPath()
class TextPath(Path):
"""
Create a path from the text.
"""
def __init__(self, xy, s, size=None, prop=None,
_interpolation_steps=1, usetex=False,
*kl, **kwargs):
"""
Create a path from the text. No support for TeX yet. Note that
it simply is a path, not an artist. You need to use the
PathPatch (or other artists) to draw this path onto the
canvas.
xy : position of the text.
s : text
size : font size
prop : font property
"""
if prop is None:
prop = FontProperties()
if size is None:
size = prop.get_size_in_points()
self._xy = xy
self.set_size(size)
self._cached_vertices = None
self._vertices, self._codes = self.text_get_vertices_codes(
prop, s,
usetex=usetex)
self._should_simplify = False
self._simplify_threshold = rcParams['path.simplify_threshold']
self._has_nonfinite = False
self._interpolation_steps = _interpolation_steps
def set_size(self, size):
"""
set the size of the text
"""
self._size = size
self._invalid = True
def get_size(self):
"""
get the size of the text
"""
return self._size
def _get_vertices(self):
"""
Return the cached path after updating it if necessary.
"""
self._revalidate_path()
return self._cached_vertices
def _get_codes(self):
"""
Return the codes
"""
return self._codes
vertices = property(_get_vertices)
codes = property(_get_codes)
def _revalidate_path(self):
"""
update the path if necessary.
The path for the text is initially create with the font size
of FONT_SCALE, and this path is rescaled to other size when
necessary.
"""
if (self._invalid or
(self._cached_vertices is None)):
tr = Affine2D().scale(
self._size / text_to_path.FONT_SCALE,
self._size / text_to_path.FONT_SCALE).translate(*self._xy)
self._cached_vertices = tr.transform(self._vertices)
self._invalid = False
def is_math_text(self, s):
"""
Returns True if the given string *s* contains any mathtext.
"""
# copied from Text.is_math_text -JJL
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
if rcParams['text.usetex']:
return s, 'TeX'
if even_dollars:
return s, True
else:
return s.replace(r'\$', '$'), False
def text_get_vertices_codes(self, prop, s, usetex):
"""
convert the string *s* to vertices and codes using the
provided font property *prop*. Mostly copied from
backend_svg.py.
"""
if usetex:
verts, codes = text_to_path.get_text_path(prop, s, usetex=True)
else:
clean_line, ismath = self.is_math_text(s)
verts, codes = text_to_path.get_text_path(prop, clean_line,
ismath=ismath)
return verts, codes
|
mit
|
gtesei/fast-furious
|
competitions/tgs-salt-identification-challenge/unet_start_2_correct_IoU.py
|
1
|
8619
|
import os
import sys
import random
import warnings
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
from tqdm import tqdm, tnrange
from itertools import chain
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from keras.models import Model, load_model
from keras.layers import Input
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras import backend as K
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
# Define IoU metric
def castF(x):
return K.cast(x, K.floatx())
def castB(x):
return K.cast(x, bool)
def iou_loss_core(true,pred):
intersection = true * pred
notTrue = 1 - true
union = true + (notTrue * pred)
return (K.sum(intersection, axis=-1) + K.epsilon()) / (K.sum(union, axis=-1) + K.epsilon())
def competitionMetric2(true, pred): #any shape can go
tresholds = [0.5 + (i*.05) for i in range(10)]
#flattened images (batch, pixels)
true = K.batch_flatten(true)
pred = K.batch_flatten(pred)
pred = castF(K.greater(pred, 0.5))
#total white pixels - (batch,)
trueSum = K.sum(true, axis=-1)
predSum = K.sum(pred, axis=-1)
#has mask or not per image - (batch,)
true1 = castF(K.greater(trueSum, 1))
pred1 = castF(K.greater(predSum, 1))
#to get images that have mask in both true and pred
truePositiveMask = castB(true1 * pred1)
#separating only the possible true positives to check iou
testTrue = tf.boolean_mask(true, truePositiveMask)
testPred = tf.boolean_mask(pred, truePositiveMask)
#getting iou and threshold comparisons
iou = iou_loss_core(testTrue,testPred)
truePositives = [castF(K.greater(iou, tres)) for tres in tresholds]
#mean of thressholds for true positives and total sum
truePositives = K.mean(K.stack(truePositives, axis=-1), axis=-1)
truePositives = K.sum(truePositives)
#to get images that don't have mask in both true and pred
trueNegatives = (1-true1) * (1 - pred1) # = 1 -true1 - pred1 + true1*pred1
trueNegatives = K.sum(trueNegatives)
return (truePositives + trueNegatives) / castF(K.shape(true)[0])
####################################################### START
start_time = time.time()
# Set some parameters
im_width = 128
im_height = 128
im_chan = 1
path_train = 'data/train/'
path_test = 'data/test/'
train_ids = next(os.walk(path_train+"images"))[2]
test_ids = next(os.walk(path_test+"images"))[2]
# Get and resize train images and masks
X_train = np.zeros((len(train_ids), im_height, im_width, im_chan), dtype=np.uint8)
Y_train = np.zeros((len(train_ids), im_height, im_width, 1), dtype=np.bool)
print('Getting and resizing train images and masks ... ')
sys.stdout.flush()
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
path = path_train
img = load_img(path + '/images/' + id_)
x = img_to_array(img)[:,:,1]
x = resize(x, (128, 128, 1), mode='constant', preserve_range=True)
X_train[n] = x
mask = img_to_array(load_img(path + '/masks/' + id_))[:,:,1]
Y_train[n] = resize(mask, (128, 128, 1), mode='constant', preserve_range=True)
print('Done!')
# Build U-Net model
inputs = Input((im_height, im_width, im_chan))
s = Lambda(lambda x: x / 255) (inputs)
c1 = Conv2D(8, (3, 3), activation='relu', padding='same') (s)
c1 = Conv2D(8, (3, 3), activation='relu', padding='same') (c1)
p1 = MaxPooling2D((2, 2)) (c1)
c2 = Conv2D(16, (3, 3), activation='relu', padding='same') (p1)
c2 = Conv2D(16, (3, 3), activation='relu', padding='same') (c2)
p2 = MaxPooling2D((2, 2)) (c2)
c3 = Conv2D(32, (3, 3), activation='relu', padding='same') (p2)
c3 = Conv2D(32, (3, 3), activation='relu', padding='same') (c3)
p3 = MaxPooling2D((2, 2)) (c3)
c4 = Conv2D(64, (3, 3), activation='relu', padding='same') (p3)
c4 = Conv2D(64, (3, 3), activation='relu', padding='same') (c4)
p4 = MaxPooling2D(pool_size=(2, 2)) (c4)
c5 = Conv2D(128, (3, 3), activation='relu', padding='same') (p4)
c5 = Conv2D(128, (3, 3), activation='relu', padding='same') (c5)
u6 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(64, (3, 3), activation='relu', padding='same') (u6)
c6 = Conv2D(64, (3, 3), activation='relu', padding='same') (c6)
u7 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(32, (3, 3), activation='relu', padding='same') (u7)
c7 = Conv2D(32, (3, 3), activation='relu', padding='same') (c7)
u8 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(16, (3, 3), activation='relu', padding='same') (u8)
c8 = Conv2D(16, (3, 3), activation='relu', padding='same') (c8)
u9 = Conv2DTranspose(8, (2, 2), strides=(2, 2), padding='same') (c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(8, (3, 3), activation='relu', padding='same') (u9)
c9 = Conv2D(8, (3, 3), activation='relu', padding='same') (c9)
outputs = Conv2D(1, (1, 1), activation='sigmoid') (c9)
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[competitionMetric2])
model.summary()
earlystopper = EarlyStopping(patience=20, verbose=1,monitor='val_competitionMetric2',mode='max')
checkpointer = ModelCheckpoint('model-tgs-salt-1.h5', verbose=1, save_best_only=True,monitor='val_competitionMetric2',mode='max')
reduce_lr = ReduceLROnPlateau(factor=0.2, patience=5, min_lr=0.00001, verbose=1,monitor='val_competitionMetric2',mode='max')
results = model.fit(X_train, Y_train, validation_split=0.1, batch_size=32, epochs=200, callbacks=[earlystopper, checkpointer,reduce_lr])
# Get and resize test images
X_test = np.zeros((len(test_ids), im_height, im_width, im_chan), dtype=np.uint8)
sizes_test = []
print('Getting and resizing test images ... ')
sys.stdout.flush()
for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):
path = path_test
img = load_img(path + '/images/' + id_)
x = img_to_array(img)[:,:,1]
sizes_test.append([x.shape[0], x.shape[1]])
x = resize(x, (128, 128, 1), mode='constant', preserve_range=True)
X_test[n] = x
print('Done!')
# Predict on train, val and test
model = load_model('model-tgs-salt-1.h5' , custom_objects={'competitionMetric2': competitionMetric2 , 'iou_loss_core': iou_loss_core , 'castB': castB , 'castF': castF})
preds_train = model.predict(X_train[:int(X_train.shape[0]*0.9)], verbose=1)
preds_val = model.predict(X_train[int(X_train.shape[0]*0.9):], verbose=1)
preds_test = model.predict(X_test, verbose=1)
# Threshold predictions
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_val_t = (preds_val > 0.5).astype(np.uint8)
preds_test_t = (preds_test > 0.5).astype(np.uint8)
# Create list of upsampled test masks
preds_test_upsampled = []
for i in tnrange(len(preds_test)):
preds_test_upsampled.append(resize(np.squeeze(preds_test[i]), (sizes_test[i][0], sizes_test[i][1]), mode='constant', preserve_range=True))
preds_test_upsampled[0].shape
def RLenc(img, order='F', format=True):
bytes = img.reshape(img.shape[0] * img.shape[1], order=order)
runs = [] ## list of run lengths
r = 0 ## the current run length
pos = 1 ## count starts from 1 per WK
for c in bytes:
if (c == 0):
if r != 0:
runs.append((pos, r))
pos += r
r = 0
pos += 1
else:
r += 1
# if last run is unsaved (i.e. data ends with 1)
if r != 0:
runs.append((pos, r))
pos += r
r = 0
if format:
z = ''
for rr in runs:
z += '{} {} '.format(rr[0], rr[1])
return z[:-1]
else:
return runs
pred_dict = {fn[:-4]:RLenc(np.round(preds_test_upsampled[i])) for i,fn in tqdm(enumerate(test_ids))}
sub = pd.DataFrame.from_dict(pred_dict,orient='index')
sub.index.names = ['id']
sub.columns = ['rle_mask']
sub.to_csv('submission.csv')
###
seconds = time.time() - start_time
mins = seconds / 60
hours = mins / 60
days = hours / 24
print("------>>>>>>> elapsed seconds:", seconds)
print("------>>>>>>> elapsed minutes:", mins)
print("------>>>>>>> elapsed hours:", hours)
print("------>>>>>>> elapsed days:", days)
|
mit
|
shujaatak/UAV_MissionPlanner
|
Lib/site-packages/numpy/doc/creation.py
|
94
|
5411
|
"""
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or record arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard
to convert are libraries like PIL (able to read and write many image formats
such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
|
gpl-2.0
|
nipunreddevil/bayespy
|
bayespy/inference/vmp/nodes/GaussianProcesses.py
|
2
|
26795
|
######################################################################
# Copyright (C) 2011,2012 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import itertools
import numpy as np
#import scipy as sp
#import scipy.linalg.decomp_cholesky as decomp
import scipy.linalg as linalg
#import scipy.special as special
#import matplotlib.pyplot as plt
#import time
#import profile
#import scipy.spatial.distance as distance
import scipy.sparse as sp
import utils
import Nodes.ExponentialFamily as EF
import Nodes.CovarianceFunctions as CF
import imp
imp.reload(utils)
imp.reload(EF)
imp.reload(CF)
class CovarianceMatrix:
def cholesky(self):
pass
def multiply(A, B):
return np.multiply(A,B)
# m prior mean function
# k prior covariance function
# x data inputs
# z processed data outputs (z = inv(Cov) * (y-m(x)))
# U data covariance Cholesky factor
def gp_posterior_moment_function(m, k, x, y, k_sparse=None, pseudoinputs=None, noise=None):
# Prior
# FIXME: We are ignoring the covariance of mu now..
mu = m(x)[0]
## if np.ndim(mu) == 1:
## mu = np.asmatrix(mu).T
## else:
## mu = np.asmatrix(mu)
K_noise = None
if noise != None:
if K_noise is None:
K_noise = noise
else:
K_noise += noise
if k_sparse != None:
if K_noise is None:
K_noise = k_sparse(x,x)[0]
else:
K_noise += k_sparse(x,x)[0]
if pseudoinputs != None:
p = pseudoinputs
#print('in pseudostuff')
#print(K_noise)
#print(np.shape(K_noise))
K_pp = k(p,p)[0]
K_xp = k(x,p)[0]
U = utils.chol(K_noise)
# Compute Lambda
Lambda = K_pp + np.dot(K_xp.T, utils.chol_solve(U, K_xp))
U_lambda = utils.chol(Lambda)
# Compute statistics for posterior predictions
#print(np.shape(U_lambda))
#print(np.shape(y))
z = utils.chol_solve(U_lambda,
np.dot(K_xp.T,
utils.chol_solve(U,
y - mu)))
U = utils.chol(K_pp)
# Now we can forget the location of the observations and
# consider only the pseudoinputs when predicting.
x = p
else:
K = K_noise
if K is None:
K = k(x,x)[0]
else:
try:
K += k(x,x)[0]
except:
K = K + k(x,x)[0]
# Compute posterior GP
N = len(y)
U = None
z = None
if N > 0:
U = utils.chol(K)
z = utils.chol_solve(U, y-mu)
def get_moments(h, covariance=1, mean=True):
K_xh = k(x, h)[0]
if k_sparse != None:
try:
# This may not work, for instance, if either one is a
# sparse matrix.
K_xh += k_sparse(x, h)[0]
except:
K_xh = K_xh + k_sparse(x, h)[0]
# NumPy has problems when mixing matrices and arrays.
# Matrices may appear, for instance, when you sum an array and
# a sparse matrix. Make sure the result is either an array or
# a sparse matrix (not dense matrix!), because matrix objects
# cause lots of problems:
#
# array.dot(array) = array
# matrix.dot(array) = matrix
# sparse.dot(array) = array
if not sp.issparse(K_xh):
K_xh = np.asarray(K_xh)
# Function for computing posterior moments
if mean:
# Mean vector
# FIXME: Ignoring the covariance of prior mu
m_h = m(h)[0]
if z != None:
m_h += K_xh.T.dot(z)
else:
m_h = None
# Compute (co)variance matrix/vector
if covariance:
if covariance == 1:
## Compute variance vector
k_h = k(h)[0]
if k_sparse != None:
k_h += k_sparse(h)[0]
if U != None:
if isinstance(K_xh, np.ndarray):
k_h -= np.einsum('i...,i...',
K_xh,
utils.chol_solve(U, K_xh))
else:
# TODO: This isn't very efficient way, but
# einsum doesn't work for sparse matrices..
# This may consume A LOT of memory for sparse
# matrices.
k_h -= np.asarray(K_xh.multiply(utils.chol_solve(U, K_xh))).sum(axis=0)
if pseudoinputs != None:
if isinstance(K_xh, np.ndarray):
k_h += np.einsum('i...,i...',
K_xh,
utils.chol_solve(U_lambda, K_xh))
else:
# TODO: This isn't very efficient way, but
# einsum doesn't work for sparse matrices..
# This may consume A LOT of memory for sparse
# matrices.
k_h += np.asarray(K_xh.multiply(utils.chol_solve(U_lambda, K_xh))).sum(axis=0)
# Ensure non-negative variances
k_h[k_h<0] = 0
return (m_h, k_h)
elif covariance == 2:
## Compute full covariance matrix
K_hh = k(h,h)[0]
if k_sparse != None:
K_hh += k_sparse(h)[0]
if U != None:
K_hh -= K_xh.T.dot(utils.chol_solve(U,K_xh))
#K_hh -= np.dot(K_xh.T, utils.chol_solve(U,K_xh))
if pseudoinputs != None:
K_hh += K_xh.T.dot(utils.chol_solve(U_lambda, K_xh))
#K_hh += np.dot(K_xh.T, utils.chol_solve(U_lambda, K_xh))
return (m_h, K_hh)
else:
return (m_h, None)
return get_moments
# Constant function using GP mean protocol
class Constant(EF.Node):
def __init__(self, f, **kwargs):
self.f = f
EF.Node.__init__(self, dims=[(np.inf,)], **kwargs)
def message_to_child(self, gradient=False):
# Wrapper
def func(x, gradient=False):
if gradient:
return ([self.f(x), None], [])
else:
return [self.f(x), None]
return func
#class MultiDimensional(EF.NodeVariable):
# """ A multi-dimensional Gaussian process f(x). """
## class ToGaussian(EF.NodeVariable):
## """ Deterministic node which transform a Gaussian process into
## finite-dimensional Gaussian variable. """
## def __init__(self, f, x, **kwargs):
## EF.NodeVariable.__init__(self,
## f,
## x,
## plates=
## dims=
# Deterministic node for creating a set of GPs which can be used as a
# mean function to a general GP node.
class Multiple(EF.NodeVariable):
def __init__(self, GPs, **kwargs):
# Ignore plates
EF.NodeVariable.__init__(self,
*GPs,
plates=(),
dims=[(np.inf,), (np.inf,np.inf)],
**kwargs)
def message_to_parent(self, index):
raise Exception("not implemented yet")
def message_to_child(self, gradient=False):
u = [parent.message_to_child() for parent in self.parents]
def get_moments(xh, **kwargs):
mh_all = []
khh_all = []
for i in range(len(self.parents)):
xi = np.array(xh[i])
#print(xi)
#print(np.shape(xi))
#print(xi)
# FIXME: We are ignoring the covariance of mu now..
if gradient:
((mh, khh), dm) = u[i](xi, **kwargs)
else:
(mh, khh) = u[i](xi, **kwargs)
#mh = u[i](xi, **kwargs)[0]
#print(mh)
#print(mh_all)
## print(mh)
## print(khh)
## print(np.shape(mh))
mh_all = np.concatenate([mh_all, mh])
#print(np.shape(mh_all))
if khh != None:
print(khh)
raise Exception('Not implemented yet for covariances')
#khh_all = np.concatenate([khh_all, khh])
# FIXME: Compute gradients!
if gradient:
return ([mh_all, khh_all], [])
else:
return [mh_all, khh_all]
#return [mh_all, khh_all]
return get_moments
# Gaussian process distribution
class GaussianProcess(EF.NodeVariable):
def __init__(self, m, k, k_sparse=None, pseudoinputs=None, **kwargs):
self.x = np.array([])
self.f = np.array([])
## self.x_obs = np.zeros((0,1))
## self.f_obs = np.zeros((0,))
if pseudoinputs != None:
pseudoinputs = EF.NodeConstant([pseudoinputs],
dims=[np.shape(pseudoinputs)])
# By default, posterior == prior
self.m = None #m
self.k = None #k
if isinstance(k, list) and isinstance(m, list):
if len(k) != len(m):
raise Exception('The number of mean and covariance functions must be equal.')
k = CF.Multiple(k)
m = Multiple(m)
elif isinstance(k, list):
D = len(k)
k = CF.Multiple(k)
m = Multiple(D*[m])
elif isinstance(m, list):
D = len(m)
k = CF.Multiple(D*[k])
m = Multiple(m)
# Ignore plates
EF.NodeVariable.__init__(self,
m,
k,
k_sparse,
pseudoinputs,
plates=(),
dims=[(np.inf,), (np.inf,np.inf)],
**kwargs)
def __call__(self, x, covariance=None):
if not covariance:
return self.u(x, covariance=False)[0]
elif covariance.lower() == 'vector':
return self.u(x, covariance=1)
elif covariance.lower() == 'matrix':
return self.u(x, covariance=2)
else:
raise Exception("Unknown covariance type requested")
def message_to_parent(self, index):
if index == 0:
k = self.parents[1].message_to_child()[0]
K = k(self.x, self.x)
return [self.x,
self.mu,
K]
if index == 1:
raise Exception("not implemented yet")
def message_to_child(self):
if self.observed:
raise Exception("Observable GP should not have children.")
return self.u
def get_parameters(self):
return self.u
def observe(self, x, f):
self.observed = True
self.x = x
self.f = f
## if np.ndim(f) == 1:
## self.f = np.asmatrix(f).T
## else:
## self.f = np.asmatrix(f)
# You might want:
# - mean for x
# - covariance (and mean) for x
# - variance (and mean) for x
# - i.e., mean and/or (co)variance for x
# - covariance for x1 and x2
def lower_bound_contribution(self, gradient=False):
# Get moment functions from parents
m = self.parents[0].message_to_child(gradient=gradient)
k = self.parents[1].message_to_child(gradient=gradient)
if self.parents[2]:
k_sparse = self.parents[2].message_to_child(gradient=gradient)
else:
k_sparse = None
if self.parents[3]:
pseudoinputs = self.parents[3].message_to_child(gradient=gradient)
#pseudoinputs = self.parents[3].message_to_child(gradient=gradient)[0]
else:
pseudoinputs = None
## m = self.parents[0].message_to_child(gradient=gradient)[0]
## k = self.parents[1].message_to_child(gradient=gradient)[0]
# Compute the parameters (covariance matrices etc) using
# parents' moment functions
DKs_xx = []
DKd_xx = []
DKd_xp = []
DKd_pp = []
Dxp = []
Dmu = []
if gradient:
# FIXME: We are ignoring the covariance of mu now..
((mu, _), Dmu) = m(self.x, gradient=True)
## if k_sparse:
## ((Ks_xx,), DKs_xx) = k_sparse(self.x, self.x, gradient=True)
if pseudoinputs:
((Ks_xx,), DKs_xx) = k_sparse(self.x, self.x, gradient=True)
((xp,), Dxp) = pseudoinputs
((Kd_pp,), DKd_pp) = k(xp,xp, gradient=True)
((Kd_xp,), DKd_xp) = k(self.x, xp, gradient=True)
else:
((K_xx,), DKd_xx) = k(self.x, self.x, gradient=True)
if k_sparse:
((Ks_xx,), DKs_xx) = k_sparse(self.x, self.x, gradient=True)
try:
K_xx += Ks_xx
except:
K_xx = K_xx + Ks_xx
else:
# FIXME: We are ignoring the covariance of mu now..
(mu, _) = m(self.x)
## if k_sparse:
## (Ks_xx,) = k_sparse(self.x, self.x)
if pseudoinputs:
(Ks_xx,) = k_sparse(self.x, self.x)
(xp,) = pseudoinputs
(Kd_pp,) = k(xp, xp)
(Kd_xp,) = k(self.x, xp)
else:
(K_xx,) = k(self.x, self.x)
if k_sparse:
(Ks_xx,) = k_sparse(self.x, self.x)
try:
K_xx += Ks_xx
except:
K_xx = K_xx + Ks_xx
mu = mu[0]
#K = K[0]
# Log pdf
if self.observed:
## Log pdf for directly observed GP
f0 = self.f - mu
#print('hereiam')
#print(K)
if pseudoinputs:
## Pseudo-input approximation
# Decompose the full-rank sparse/noise covariance matrix
try:
Us_xx = utils.cholesky(Ks_xx)
except linalg.LinAlgError:
print('Noise/sparse covariance not positive definite')
return -np.inf
# Use Woodbury-Sherman-Morrison formula with the
# following notation:
#
# y2 = f0' * inv(Kd_xp*inv(Kd_pp)*Kd_xp' + Ks_xx) * f0
#
# z = Ks_xx \ f0
# Lambda = Kd_pp + Kd_xp'*inv(Ks_xx)*Kd_xp
# nu = inv(Lambda) * (Kd_xp' * (Ks_xx \ f0))
# rho = Kd_xp * inv(Lambda) * (Kd_xp' * (Ks_xx \ f0))
#
# y2 = f0' * z - z' * rho
z = Us_xx.solve(f0)
Lambda = Kd_pp + np.dot(Kd_xp.T,
Us_xx.solve(Kd_xp))
## z = utils.chol_solve(Us_xx, f0)
## Lambda = Kd_pp + np.dot(Kd_xp.T,
## utils.chol_solve(Us_xx, Kd_xp))
try:
U_Lambda = utils.cholesky(Lambda)
#U_Lambda = utils.chol(Lambda)
except linalg.LinAlgError:
print('Lambda not positive definite')
return -np.inf
nu = U_Lambda.solve(np.dot(Kd_xp.T, z))
#nu = utils.chol_solve(U_Lambda, np.dot(Kd_xp.T, z))
rho = np.dot(Kd_xp, nu)
y2 = np.dot(f0, z) - np.dot(z, rho)
# Use matrix determinant lemma
#
# det(Kd_xp*inv(Kd_pp)*Kd_xp' + Ks_xx)
# = det(Kd_pp + Kd_xp'*inv(Ks_xx)*Kd_xp)
# * det(inv(Kd_pp)) * det(Ks_xx)
# = det(Lambda) * det(Ks_xx) / det(Kd_pp)
try:
Ud_pp = utils.cholesky(Kd_pp)
#Ud_pp = utils.chol(Kd_pp)
except linalg.LinAlgError:
print('Covariance of pseudo inputs not positive definite')
return -np.inf
logdet = (U_Lambda.logdet()
+ Us_xx.logdet()
- Ud_pp.logdet())
## logdet = (utils.logdet_chol(U_Lambda)
## + utils.logdet_chol(Us_xx)
## - utils.logdet_chol(Ud_pp))
# Compute the log pdf
L = gaussian_logpdf(y2,
0,
0,
logdet,
np.size(self.f))
# Add the variational cost of the pseudo-input
# approximation
# Compute gradients
for (dmu, func) in Dmu:
# Derivative w.r.t. mean vector
d = np.nan
# Send the derivative message
func(d)
for (dKs_xx, func) in DKs_xx:
# Compute derivative w.r.t. covariance matrix
d = np.nan
# Send the derivative message
func(d)
for (dKd_xp, func) in DKd_xp:
# Compute derivative w.r.t. covariance matrix
d = np.nan
# Send the derivative message
func(d)
V = Ud_pp.solve(Kd_xp.T)
Z = Us_xx.solve(V.T)
## V = utils.chol_solve(Ud_pp, Kd_xp.T)
## Z = utils.chol_solve(Us_xx, V.T)
for (dKd_pp, func) in DKd_pp:
# Compute derivative w.r.t. covariance matrix
d = (0.5 * np.trace(Ud_pp.solve(dKd_pp))
- 0.5 * np.trace(U_Lambda.solve(dKd_pp))
+ np.dot(nu, np.dot(dKd_pp, nu))
+ np.trace(np.dot(dKd_pp,
np.dot(V,Z))))
## d = (0.5 * np.trace(utils.chol_solve(Ud_pp, dKd_pp))
## - 0.5 * np.trace(utils.chol_solve(U_Lambda, dKd_pp))
## + np.dot(nu, np.dot(dKd_pp, nu))
## + np.trace(np.dot(dKd_pp,
## np.dot(V,Z))))
# Send the derivative message
func(d)
for (dxp, func) in Dxp:
# Compute derivative w.r.t. covariance matrix
d = np.nan
# Send the derivative message
func(d)
else:
## Full exact (no pseudo approximations)
try:
U = utils.cholesky(K_xx)
#U = utils.chol(K_xx)
except linalg.LinAlgError:
print('non positive definite, return -inf')
return -np.inf
z = U.solve(f0)
#z = utils.chol_solve(U, f0)
#print(K)
L = utils.gaussian_logpdf(np.dot(f0, z),
0,
0,
U.logdet(),
## utils.logdet_chol(U),
np.size(self.f))
for (dmu, func) in Dmu:
# Derivative w.r.t. mean vector
d = -np.sum(z)
# Send the derivative message
func(d)
for (dK, func) in DKd_xx:
# Compute derivative w.r.t. covariance matrix
#
# TODO: trace+chol_solve should be handled better
# for sparse matrices. Use sparse-inverse!
d = 0.5 * (dK.dot(z).dot(z)
- U.trace_solve_gradient(dK))
## - np.trace(U.solve(dK)))
## d = 0.5 * (dK.dot(z).dot(z)
## - np.trace(utils.chol_solve(U, dK)))
#print('derivate', d, dK)
## d = 0.5 * (np.dot(z, np.dot(dK, z))
## - np.trace(utils.chol_solve(U, dK)))
#
# Send the derivative message
func(d)
for (dK, func) in DKs_xx:
# Compute derivative w.r.t. covariance matrix
d = 0.5 * (dK.dot(z).dot(z)
- U.trace_solve_gradient(dK))
## - np.trace(U.solve(dK)))
## d = 0.5 * (dK.dot(z).dot(z)
## - np.trace(utils.chol_solve(U, dK)))
## d = 0.5 * (np.dot(z, np.dot(dK, z))
## - np.trace(utils.chol_solve(U, dK)))
# Send the derivative message
func(d)
else:
## Log pdf for latent GP
raise Exception('Not implemented yet')
return L
## Let f1 be observed and f2 latent function values.
# Compute <log p(f1,f2|m,k)>
#L = gaussian_logpdf(sum_product(np.outer(self.f,self.f) + self.Cov,
# Compute <log q(f2)>
def update(self):
# Messages from parents
m = self.parents[0].message_to_child()
k = self.parents[1].message_to_child()
if self.parents[2]:
k_sparse = self.parents[2].message_to_child()
else:
k_sparse = None
if self.parents[3]:
pseudoinputs = self.parents[3].message_to_child()[0]
else:
pseudoinputs = None
## m = self.parents[0].message_to_child()[0]
## k = self.parents[1].message_to_child()[0]
if self.observed:
# Observations of this node
self.u = gp_posterior_moment_function(m,
k,
self.x,
self.f,
k_sparse=k_sparse,
pseudoinputs=pseudoinputs)
else:
x = np.array([])
y = np.array([])
# Messages from children
for (child,index) in self.children:
(msg, mask) = child.message_to_parent(index)
# Ignoring masks and plates..
# m[0] is the inputs
x = np.concatenate((x, msg[0]), axis=-2)
# m[1] is the observations
y = np.concatenate((y, msg[1]))
# m[2] is the covariance matrix
V = linalg.block_diag(V, msg[2])
self.u = gp_posterior_moment_function(m, k, x, y, covariance=V)
self.x = x
self.f = y
# At least for now, simplify this GP node such that a GP is either
# observed or latent. If it is observed, it doesn't take messages from
# children, actually, it should not even have children!
## # Pseudo for GPFA:
## k1 = gp_cov_se(magnitude=theta1, lengthscale=theta2)
## k2 = gp_cov_periodic(magnitude=.., lengthscale=.., period=..)
## k3 = gp_cov_rq(magnitude=.., lengthscale=.., alpha=..)
## f = NodeGPSet(0, [k1,k2,k3]) # assumes block diagonality
## # f = NodeGPSet(0, [[k11,k12,k13],[k21,k22,k23],[k31,k32,k33]])
## X = GaussianFromGP(f, [ [[t0,0],[t0,1],[t0,2]], [t1,0],[t1,1],[t1,2], ..])
## ...
## # Construct a sum of GPs if interested only in the sum term
## k1 = gp_cov_se(magnitude=theta1, lengthscale=theta2)
## k2 = gp_cov_periodic(magnitude=.., lengthscale=.., period=..)
## k = gp_cov_sum(k1, k2)
## f = NodeGP(0, k)
## f.observe(x, y)
## f.update()
## (mp, kp) = f.get_parameters()
## # Construct a sum of GPs when interested also in the individual
## # GPs:
## k1 = gp_cov_se(magnitude=theta1, lengthscale=theta2)
## k2 = gp_cov_periodic(magnitude=.., lengthscale=.., period=..)
## k3 = gp_cov_delta(magnitude=theta3)
## f = NodeGPSum(0, [k1,k2,k3])
## x = np.array([1,2,3,4,5,6,7,8,9,10])
## y = np.sin(x[0]) + np.random.normal(0, 0.1, (10,))
## # Observe the sum (index 0)
## f.observe((0,x), y)
## # Inference
## f.update()
## (mp, kp) = f.get_parameters()
## # Mean of the sum
## mp[0](...)
## # Mean of the individual terms
## mp[1](...)
## mp[2](...)
## mp[3](...)
## # Covariance of the sum
## kp[0][0](..., ...)
## # Other covariances
## kp[1][1](..., ...)
## kp[2][2](..., ...)
## kp[3][3](..., ...)
## kp[1][2](..., ...)
## kp[1][3](..., ...)
## kp[2][3](..., ...)
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.