repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rsignell-usgs/python-training | web-services/xray_test.py | 1 | 1378 |
# coding: utf-8
# #Testing Xray on weather forecast model data
# In[18]:
import xray
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
get_ipython().magic(u'matplotlib inline')
# In[19]:
URL = 'http://thredds.ucar.edu/thredds/dodsC/grib/NCEP/GFS/Global_0p5deg/Best'
# In[20]:
ds = xray.open_dataset(URL)
# In[21]:
ds
# In[22]:
# select lat,lon region of interest
# note: slice(20.5,55.0) fails
dsloc = ds.sel(lon=slice(230.5,300.0),lat=slice(55.0,20.5))
# In[23]:
# select closest data to time of interest
#date = datetime.datetime(2015,7,15,3,0,0)
date = datetime.datetime.now()
ds_snapshot = dsloc.sel(time=date,time1=date,time2=date,method='nearest')
# In[24]:
ds.data_vars
# In[25]:
ds.coords
# In[26]:
ds.attrs
# In[27]:
t = ds_snapshot['Temperature_surface']
# In[28]:
t.shape
# In[29]:
plt.pcolormesh(t.lon.data,t.lat.data,t.data)
plt.title(t.name+pd.Timestamp(t.time.values).strftime(': %Y-%m-%d %H:%M:%S %Z %z'));
# In[30]:
# time series closest to specified lon,lat location
ds_series = ds.sel(lon=250.,lat=33.,method='nearest')
# In[31]:
# Select temperature and convert to Pandas Series
v_series = ds_series['Temperature_surface'].to_series()
# In[32]:
v_series.plot(title=v_series.name);
# In[33]:
ds_snapshot
# In[34]:
#ds_snapshot.to_netcdf('ds_snapshot.nc')
# In[ ]:
| cc0-1.0 |
GuessWhoSamFoo/pandas | pandas/tests/indexes/interval/test_construction.py | 2 | 15259 | from __future__ import division
from functools import partial
import numpy as np
import pytest
from pandas.compat import lzip
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas import (
Categorical, CategoricalIndex, Float64Index, Index, Int64Index, Interval,
IntervalIndex, date_range, notna, period_range, timedelta_range)
from pandas.core.arrays import IntervalArray
import pandas.core.common as com
import pandas.util.testing as tm
@pytest.fixture(params=[None, 'foo'])
def name(request):
return request.param
class Base(object):
"""
Common tests for all variations of IntervalIndex construction. Input data
to be supplied in breaks format, then converted by the subclass method
get_kwargs_from_breaks to the expected format.
"""
@pytest.mark.parametrize('breaks', [
[3, 14, 15, 92, 653],
np.arange(10, dtype='int64'),
Int64Index(range(-10, 11)),
Float64Index(np.arange(20, 30, 0.5)),
date_range('20180101', periods=10),
date_range('20180101', periods=10, tz='US/Eastern'),
timedelta_range('1 day', periods=10)])
def test_constructor(self, constructor, breaks, closed, name):
result_kwargs = self.get_kwargs_from_breaks(breaks, closed)
result = constructor(closed=closed, name=name, **result_kwargs)
assert result.closed == closed
assert result.name == name
assert result.dtype.subtype == getattr(breaks, 'dtype', 'int64')
tm.assert_index_equal(result.left, Index(breaks[:-1]))
tm.assert_index_equal(result.right, Index(breaks[1:]))
@pytest.mark.parametrize('breaks, subtype', [
(Int64Index([0, 1, 2, 3, 4]), 'float64'),
(Int64Index([0, 1, 2, 3, 4]), 'datetime64[ns]'),
(Int64Index([0, 1, 2, 3, 4]), 'timedelta64[ns]'),
(Float64Index([0, 1, 2, 3, 4]), 'int64'),
(date_range('2017-01-01', periods=5), 'int64'),
(timedelta_range('1 day', periods=5), 'int64')])
def test_constructor_dtype(self, constructor, breaks, subtype):
# GH 19262: conversion via dtype parameter
expected_kwargs = self.get_kwargs_from_breaks(breaks.astype(subtype))
expected = constructor(**expected_kwargs)
result_kwargs = self.get_kwargs_from_breaks(breaks)
iv_dtype = IntervalDtype(subtype)
for dtype in (iv_dtype, str(iv_dtype)):
result = constructor(dtype=dtype, **result_kwargs)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('breaks', [
[np.nan] * 2, [np.nan] * 4, [np.nan] * 50])
def test_constructor_nan(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_subtype = np.float64
expected_values = np.array(breaks[:-1], dtype=object)
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(result._ndarray_values, expected_values)
@pytest.mark.parametrize('breaks', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype='datetime64[ns]'),
np.array([], dtype='timedelta64[ns]')])
def test_constructor_empty(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_values = np.array([], dtype=object)
expected_subtype = getattr(breaks, 'dtype', np.int64)
assert result.empty
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(result._ndarray_values, expected_values)
@pytest.mark.parametrize('breaks', [
tuple('0123456789'),
list('abcdefghij'),
np.array(list('abcdefghij'), dtype=object),
np.array(list('abcdefghij'), dtype='<U1')])
def test_constructor_string(self, constructor, breaks):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalIndex')
with pytest.raises(TypeError, match=msg):
constructor(**self.get_kwargs_from_breaks(breaks))
@pytest.mark.parametrize('cat_constructor', [
Categorical, CategoricalIndex])
def test_constructor_categorical_valid(self, constructor, cat_constructor):
# GH 21243/21253
if isinstance(constructor, partial) and constructor.func is Index:
# Index is defined to create CategoricalIndex from categorical data
pytest.skip()
breaks = np.arange(10, dtype='int64')
expected = IntervalIndex.from_breaks(breaks)
cat_breaks = cat_constructor(breaks)
result_kwargs = self.get_kwargs_from_breaks(cat_breaks)
result = constructor(**result_kwargs)
tm.assert_index_equal(result, expected)
def test_generic_errors(self, constructor):
# filler input data to be used when supplying invalid kwargs
filler = self.get_kwargs_from_breaks(range(10))
# invalid closed
msg = "invalid option for 'closed': invalid"
with pytest.raises(ValueError, match=msg):
constructor(closed='invalid', **filler)
# unsupported dtype
msg = 'dtype must be an IntervalDtype, got int64'
with pytest.raises(TypeError, match=msg):
constructor(dtype='int64', **filler)
# invalid dtype
msg = "data type 'invalid' not understood"
with pytest.raises(TypeError, match=msg):
constructor(dtype='invalid', **filler)
# no point in nesting periods in an IntervalIndex
periods = period_range('2000-01-01', periods=10)
periods_kwargs = self.get_kwargs_from_breaks(periods)
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with pytest.raises(ValueError, match=msg):
constructor(**periods_kwargs)
# decreasing values
decreasing_kwargs = self.get_kwargs_from_breaks(range(10, -1, -1))
msg = 'left side of interval must be <= right side'
with pytest.raises(ValueError, match=msg):
constructor(**decreasing_kwargs)
class TestFromArrays(Base):
"""Tests specific to IntervalIndex.from_arrays"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_arrays
def get_kwargs_from_breaks(self, breaks, closed='right'):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_arrays
"""
return {'left': breaks[:-1], 'right': breaks[1:]}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list('01234abcde'), ordered=True)
msg = ('category, object, and string subtypes are not supported '
'for IntervalIndex')
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_arrays(data[:-1], data[1:])
# unequal length
left = [0, 1, 2]
right = [2, 3]
msg = 'left and right must have the same length'
with pytest.raises(ValueError, match=msg):
IntervalIndex.from_arrays(left, right)
@pytest.mark.parametrize('left_subtype, right_subtype', [
(np.int64, np.float64), (np.float64, np.int64)])
def test_mixed_float_int(self, left_subtype, right_subtype):
"""mixed int/float left/right results in float for both sides"""
left = np.arange(9, dtype=left_subtype)
right = np.arange(1, 10, dtype=right_subtype)
result = IntervalIndex.from_arrays(left, right)
expected_left = Float64Index(left)
expected_right = Float64Index(right)
expected_subtype = np.float64
tm.assert_index_equal(result.left, expected_left)
tm.assert_index_equal(result.right, expected_right)
assert result.dtype.subtype == expected_subtype
class TestFromBreaks(Base):
"""Tests specific to IntervalIndex.from_breaks"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_breaks
def get_kwargs_from_breaks(self, breaks, closed='right'):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_breaks
"""
return {'breaks': breaks}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list('01234abcde'), ordered=True)
msg = ('category, object, and string subtypes are not supported '
'for IntervalIndex')
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_breaks(data)
def test_length_one(self):
"""breaks of length one produce an empty IntervalIndex"""
breaks = [0]
result = IntervalIndex.from_breaks(breaks)
expected = IntervalIndex.from_breaks([])
tm.assert_index_equal(result, expected)
class TestFromTuples(Base):
"""Tests specific to IntervalIndex.from_tuples"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_tuples
def get_kwargs_from_breaks(self, breaks, closed='right'):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_tuples
"""
if len(breaks) == 0:
return {'data': breaks}
tuples = lzip(breaks[:-1], breaks[1:])
if isinstance(breaks, (list, tuple)):
return {'data': tuples}
elif is_categorical_dtype(breaks):
return {'data': breaks._constructor(tuples)}
return {'data': com.asarray_tuplesafe(tuples)}
def test_constructor_errors(self):
# non-tuple
tuples = [(0, 1), 2, (3, 4)]
msg = 'IntervalIndex.from_tuples received an invalid item, 2'
with pytest.raises(TypeError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
# too few/many items
tuples = [(0, 1), (2,), (3, 4)]
msg = 'IntervalIndex.from_tuples requires tuples of length 2, got {t}'
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
tuples = [(0, 1), (2, 3, 4), (5, 6)]
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
def test_na_tuples(self):
# tuple (NA, NA) evaluates the same as NA as an elemenent
na_tuple = [(0, 1), (np.nan, np.nan), (2, 3)]
idx_na_tuple = IntervalIndex.from_tuples(na_tuple)
idx_na_element = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)])
tm.assert_index_equal(idx_na_tuple, idx_na_element)
class TestClassConstructors(Base):
"""Tests specific to the IntervalIndex/Index constructors"""
@pytest.fixture(params=[IntervalIndex, partial(Index, dtype='interval')],
ids=['IntervalIndex', 'Index'])
def constructor(self, request):
return request.param
def get_kwargs_from_breaks(self, breaks, closed='right'):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by the IntervalIndex/Index constructors
"""
if len(breaks) == 0:
return {'data': breaks}
ivs = [Interval(l, r, closed) if notna(l) else l
for l, r in zip(breaks[:-1], breaks[1:])]
if isinstance(breaks, list):
return {'data': ivs}
elif is_categorical_dtype(breaks):
return {'data': breaks._constructor(ivs)}
return {'data': np.array(ivs, dtype=object)}
def test_generic_errors(self, constructor):
"""
override the base class implementation since errors are handled
differently; checks unnecessary since caught at the Interval level
"""
pass
def test_constructor_errors(self, constructor):
# mismatched closed within intervals with no constructor override
ivs = [Interval(0, 1, closed='right'), Interval(2, 3, closed='left')]
msg = 'intervals must all be closed on the same side'
with pytest.raises(ValueError, match=msg):
constructor(ivs)
# scalar
msg = (r'IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with pytest.raises(TypeError, match=msg):
constructor(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with pytest.raises(TypeError, match=msg):
constructor([0, 1])
@pytest.mark.parametrize('data, closed', [
([], 'both'),
([np.nan, np.nan], 'neither'),
([Interval(0, 3, closed='neither'),
Interval(2, 5, closed='neither')], 'left'),
([Interval(0, 3, closed='left'),
Interval(2, 5, closed='right')], 'neither'),
(IntervalIndex.from_breaks(range(5), closed='both'), 'right')])
def test_override_inferred_closed(self, constructor, data, closed):
# GH 19370
if isinstance(data, IntervalIndex):
tuples = data.to_tuples()
else:
tuples = [(iv.left, iv.right) if notna(iv) else iv for iv in data]
expected = IntervalIndex.from_tuples(tuples, closed=closed)
result = constructor(data, closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('values_constructor', [
list, np.array, IntervalIndex, IntervalArray])
def test_index_object_dtype(self, values_constructor):
# Index(intervals, dtype=object) is an Index (not an IntervalIndex)
intervals = [Interval(0, 1), Interval(1, 2), Interval(2, 3)]
values = values_constructor(intervals)
result = Index(values, dtype=object)
assert type(result) is Index
tm.assert_numpy_array_equal(result.values, np.array(values))
class TestFromIntervals(TestClassConstructors):
"""
Tests for IntervalIndex.from_intervals, which is deprecated in favor of the
IntervalIndex constructor. Same tests as the IntervalIndex constructor,
plus deprecation test. Should only need to delete this class when removed.
"""
@pytest.fixture
def constructor(self):
def from_intervals_ignore_warnings(*args, **kwargs):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
return IntervalIndex.from_intervals(*args, **kwargs)
return from_intervals_ignore_warnings
def test_deprecated(self):
ivs = [Interval(0, 1), Interval(1, 2)]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
IntervalIndex.from_intervals(ivs)
@pytest.mark.skip(reason='parent class test that is not applicable')
def test_index_object_dtype(self):
pass
| bsd-3-clause |
HeraclesHX/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
BMJHayward/infusionsoft_xpmt | examples/IS_dataplotting.py | 1 | 1550 |
# coding: utf-8
# In[3]:
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
import seaborn as sbrn
# In[4]:
monthly_sales = pd.read_csv(r'S:\Program Files (x86)\Users\SERVER-MEDIA\Downloads\monthsales.csv')
# In[5]:
monthly_sales.head()
# In[6]:
monthly_sales['Amt sold']
# In[7]:
monthly_sales.loc[:, 'Amt sold'] = monthly_sales['Amt sold'].str.strip('AUD')
monthly_sales.loc[:, 'Amt sold'] = monthly_sales['Amt sold'].str.strip('-AUD')
monthly_sales.loc[:, 'Amt sold'] = monthly_sales['Amt sold'].str.replace(',', '')
monthly_sales.loc[:, 'Amt sold'] = monthly_sales['Amt sold'].astype(float)
monthly_sales.head()
# In[8]:
type(monthly_sales['Amt sold'][3])
# In[9]:
def numToMonth(num):
return{
1 : 'Jan',
2 : 'Feb',
3 : 'Mar',
4 : 'Apr',
5 : 'May',
6 : 'Jun',
7 : 'Jul',
8 : 'Aug',
9 : 'Sep',
10 : 'Oct',
11 : 'Nov',
12 : 'Dec'
}[num]
# In[10]:
monthly_sales = monthly_sales.drop(monthly_sales.index[22])
# In[11]:
monthly_sales.loc[:, 'month'] = monthly_sales['month'].map(numToMonth)
# In[12]:
monthly_sales['Amt sold'][:-1].plot.bar()
plt.ylabel('Monthly revenue')
plt.show()
# In[14]:
monthly_sales.plot.bar(x='month', y='Amt sold')
plt.show()
# In[ ]:
def salesplot(dframe, x_axis, y_axis):
if x_axis == 'month':
dframe.loc[:, x_axis] = dframe[x_axis].map(numToMonth)
dframe.plot.bar(x=x_axis, y=y_axis)
plt.show()
| mit |
mugizico/scikit-learn | sklearn/cross_validation.py | 96 | 58309 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
Ernestyj/PyStudy | DataScience/python/td_query/test/test_data_manipulate.py | 1 | 19953 | # -*- coding: utf-8 -*-
import unittest
import os
import pickle
import pandas as pd
import numpy as np
from td_query import ROOT_PATH
from td_query.data_manipulate import data_manipulate_instance as instance
from teradata import UdaExec
class TestDataManipulate(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("**************************************** setUpClass ****************************************")
instance.init()
print(instance.teradata)
@classmethod
def tearDownClass(cls):
print("************************************** tearDownClass ***************************************")
def setUp(self):
print("****** setUp *******")
def tearDown(self):
print("***** tearDown *****")
def _example(self):
df = instance.query_sample()
# with open(ROOT_PATH + '/external/df_dispatch_bna.pickle', 'wb') as f: # save
# pickle.dump(df, f)
print(df)
def _calculate(self):
def percent(x, y):
return round(x/y*100, 2)
total = 115554
print(
percent(2877, total),
percent(3909, total),
percent(23030, total),
percent(18840, total),
percent(66898, total),
)
def _query(self):
query = '''select top 10 * from pp_scratch_risk.ms_auto_trend_us_bad;'''
df = instance.query(query)
print(df)
def _query_table_schema(self):
dest_db = "pp_scratch_risk"
dest_table = "ms_auto_trend_us2_1_3_100_100_1_1_1"
result_cursor = instance.teradata.execute("show select * from {}.{};".format(dest_db, dest_table))
last_row = result_cursor.fetchall()
print(last_row)
def _query_table_top_rows(self):
table = "pp_scratch_risk.ms_auto_trend_us_bad"
df = instance.query_table_top_rows(table)
print(df)
def _insert_to_table(self):
cols = ['id', 'name', 'phone']
data = [
(1, "jy", "1888"),
(2, "jy", "1999"),
]
df = pd.DataFrame.from_records(data, columns=cols)
df_name_is_jy = df[df['name']=='jy']
df = df.append([df_name_is_jy]*2, ignore_index=True)
print(pd.concat([df_name_is_jy]*2, ignore_index=True))
# print(df)
print("-------------")
database = "pp_scratch_risk"
table = "jy_test"
instance.insert_to_table(df, database, table)
query = '''select * from {}.{};'''.format(database, table)
result_df = instance.query(query)
print(result_df)
def _create_table_from_src_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_us2_1_3'
dest_db = "pp_scratch_risk"
dest_table = "ms_auto_trend_us2_1_3_100_100_1_1_1"
instance.create_table_from_src_table_schema(src_db, src_table, dest_db, dest_table)
def _drop_table(self):
dest_db = "pp_scratch_risk"
dest_table = "ms_auto_trend_us2_1_3_100_100_1_1_1"
instance.drop_table(dest_db, dest_table)
def _transalte_100_63_22_14_1(self):
rules = [
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 == 'a-1k') & (SELLER_CONSUMER_SEG == 'C')",
"(SELLER_CONSUMER_SEG == 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string == '10008') & (amt2 != 'c-1h') & (amt2 != 'e-<50')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_30_20_3_4_1(self):
rules = [
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 == 'a-1k') & (SELLER_CONSUMER_SEG == 'C')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (amt2 == 'a-1k') & (SUB_FLOW != 'MS Money Request - Invoicing') & (SUB_FLOW == 'MS Mobile Cons App Send Money - Commercial') & (IS_ULP_TRANS_T_F >= 0.5)",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (amt2 == 'a-1k') & (SUB_FLOW != 'MS Money Request - Invoicing') & (SUB_FLOW == 'MS Mobile Cons App Send Money - Commercial') & (IS_ULP_TRANS_T_F < 0.5)",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (amt2 == 'a-1k') & (SUB_FLOW != 'MS Money Request - Invoicing') & (SUB_FLOW != 'MS Mobile Cons App Send Money - Commercial')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_30_20_3_4_1_nloss(self):
rules = [
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string == '10008') & (SUB_FLOW == 'MS Mobile Cons App Send Money - Commercial') & (amt2 == 'a-1k') & (SELLER_CONSUMER_SEG == 'C') & (SELLER_SEG == '04 YS')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string != '10008') & (amt2 != 'e-<50') & (dc_string != '10002') & (amt2 != 'd-50') & (SUB_FLOW != 'MS Mobile Money Request - Invoicing') & (SUB_FLOW != 'MS Money Request') & (SUB_FLOW != 'MS Mobile Money Request') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'c-1h') & (dc_string == '10010')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string == '10008') & (SUB_FLOW != 'MS Mobile Cons App Send Money - Commercial') & (SELLER_CONSUMER_SEG == 'C') & (amt2 != 'c-1h') & (amt2 != 'd-50') & (amt2 != 'e-<50')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '10008') & (dc_string != '<missing>') & (amt2 == 'a-1k') & (SUB_FLOW != 'MS Mobile Cons App Send Money - Commercial') & (SUB_FLOW == 'MS Send Money Internal') & (dc_string == '10002')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (amt2 == 'a-1k') & (SUB_FLOW != 'MS Money Request - Invoicing') & (SUB_FLOW == 'MS Mobile Cons App Send Money - Commercial') & (IS_ULP_TRANS_T_F >= 0.5)",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string != '10008') & (amt2 != 'e-<50') & (dc_string != '10002') & (amt2 != 'd-50') & (SUB_FLOW != 'MS Mobile Money Request - Invoicing') & (SUB_FLOW != 'MS Money Request') & (SUB_FLOW != 'MS Mobile Money Request') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'c-1h') & (dc_string != '10010') & (SUB_FLOW == 'MS Send Money Internal') & (amt2 != 'a-1k') & (RCVR_CNTRY_CODE != 'CA ') & (SELLER_SEG != '04 YS')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '10008') & (dc_string != '<missing>') & (amt2 == 'a-1k') & (SUB_FLOW == 'MS Mobile Cons App Send Money - Commercial')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (amt2 != 'a-1k') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 == 'b-5h')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string == '10008') & (SUB_FLOW != 'MS Mobile Cons App Send Money - Commercial') & (SELLER_CONSUMER_SEG != 'C') & (SUB_FLOW == 'MS Send Money Internal') & (amt2 == 'a-1k')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '10008') & (dc_string != '<missing>') & (amt2 == 'a-1k') & (SUB_FLOW != 'MS Mobile Cons App Send Money - Commercial') & (SUB_FLOW == 'MS Send Money Internal') & (dc_string != '10002') & (SELLER_SEG == '04 YS') & (dc_string == '10010')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (amt2 == 'a-1k') & (SUB_FLOW != 'MS Money Request - Invoicing') & (SUB_FLOW != 'MS Mobile Cons App Send Money - Commercial')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '10008') & (dc_string != '<missing>') & (amt2 == 'a-1k') & (SUB_FLOW != 'MS Mobile Cons App Send Money - Commercial') & (SUB_FLOW == 'MS Send Money Internal') & (dc_string != '10002') & (SELLER_SEG == '04 YS') & (dc_string != '10010')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string != '10008') & (amt2 != 'e-<50') & (dc_string != '10002') & (amt2 != 'd-50') & (SUB_FLOW != 'MS Mobile Money Request - Invoicing') & (SUB_FLOW != 'MS Money Request') & (SUB_FLOW != 'MS Mobile Money Request') & (IS_ULP_TRANS_T_F < 0.5) & (amt2 != 'c-1h') & (dc_string == '10003')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (amt2 == 'a-1k') & (SUB_FLOW != 'MS Money Request - Invoicing') & (SUB_FLOW == 'MS Mobile Cons App Send Money - Commercial') & (IS_ULP_TRANS_T_F < 0.5)",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string != '10008') & (amt2 != 'e-<50') & (dc_string == '10002') & (amt2 == 'a-1k')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (amt2 == 'a-1k') & (SUB_FLOW == 'MS Money Request - Invoicing')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string != '10008') & (amt2 != 'e-<50') & (dc_string != '10002') & (amt2 != 'd-50') & (SUB_FLOW == 'MS Mobile Money Request - Invoicing') & (amt2 != 'c-1h') & (RCVR_CNTRY_CODE != 'CA ') & (dc_string != '<missing>') & (amt2 == 'a-1k')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string != '10008') & (amt2 != 'e-<50') & (dc_string != '10002') & (amt2 != 'd-50') & (SUB_FLOW != 'MS Mobile Money Request - Invoicing') & (SUB_FLOW != 'MS Money Request') & (SUB_FLOW != 'MS Mobile Money Request') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'c-1h') & (dc_string != '10010') & (SUB_FLOW == 'MS Send Money Internal') & (amt2 == 'a-1k') & (RCVR_CNTRY_CODE != 'CA ') & (dc_string != '<missing>')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_164_89_5_8_1(self):
rules = [
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (dc_string != '10005') & (amt2 == 'a-1k') & (SELLER_CONSUMER_SEG == 'C') & (SELLER_SEG == '04 YS')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (dc_string != '10005') & (amt2 == 'a-1k') & (SELLER_CONSUMER_SEG == 'C') & (SELLER_SEG != '04 YS')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'c-1h') & (amt2 != 'e-<50')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_1000_500_100_50_1(self):
rules = [
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 != 'c-1h') & (dc_string != '10005') & (amt2 != 'd-50') & (dc_string != '12123') & (SELLER_CONSUMER_SEG == 'C') & (amt2 == 'a-1k') & (SELLER_SEG == '04 YS') & (dc_string == '10008') & (SUB_FLOW == 'MS Send Money Internal')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 != 'c-1h') & (dc_string != '10005') & (amt2 != 'd-50') & (dc_string != '12123') & (SELLER_CONSUMER_SEG == 'C') & (amt2 == 'a-1k') & (SELLER_SEG == '04 YS') & (dc_string == '10008') & (SUB_FLOW != 'MS Send Money Internal')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (amt2 == 'a-1k') & (SUB_FLOW != 'MS Money Request - Invoicing') & (SUB_FLOW == 'MS Mobile Cons App Send Money - Commercial') & (IS_ULP_TRANS_T_F >= 0.5)",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 != 'c-1h') & (dc_string != '10005') & (amt2 != 'd-50') & (dc_string != '12123') & (SELLER_CONSUMER_SEG == 'C') & (amt2 == 'a-1k') & (SELLER_SEG != '04 YS')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 != 'c-1h') & (dc_string != '10005') & (amt2 != 'd-50') & (dc_string != '12123') & (SELLER_CONSUMER_SEG == 'C') & (amt2 == 'a-1k') & (SELLER_SEG == '04 YS') & (dc_string != '10008')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (amt2 == 'a-1k') & (SUB_FLOW != 'MS Money Request - Invoicing') & (SUB_FLOW != 'MS Mobile Cons App Send Money - Commercial')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_1_1_1_1_1(self):
rules = [
"(SELLER_CONSUMER_SEG != 'Y') & (SELLER_SEG == '04 YS') & (amt2 != 'e-<50') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 != 'd-50')",
"(SELLER_CONSUMER_SEG == 'Y') & (SELLER_SEG == '04 YS') & (amt2 != 'e-<50') & (SUB_FLOW == 'MS Send Money Internal') & (IS_ULP_TRANS_T_F >= 0.5) & (RCVR_CNTRY_CODE != 'CA ') & (dc_string == '10008')",
"(SELLER_CONSUMER_SEG != 'Y') & (SELLER_SEG != '04 YS') & (amt2 != 'e-<50') & (SUB_FLOW != 'MS Mobile Cons App Send Money - Commercial') & (amt2 == 'a-1k') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>')",
"(SELLER_CONSUMER_SEG == 'Y') & (SELLER_SEG != '04 YS') & (SUB_FLOW != 'MS Send Money Internal') & (SUB_FLOW == 'MS Mobile Money Request - Invoicing API')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_mix(self):
rules = [
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (dc_string != '10005') & (amt2 == 'a-1k') & (SELLER_CONSUMER_SEG == 'C') & (SELLER_SEG == '04 YS') & (dc_string == '10008') & (SUB_FLOW == 'MS Send Money Internal')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (dc_string != '10005') & (amt2 == 'a-1k') & (SELLER_CONSUMER_SEG == 'C') & (SELLER_SEG == '04 YS') & (dc_string == '10008') & (SUB_FLOW != 'MS Send Money Internal')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (dc_string != '10005') & (amt2 == 'a-1k') & (SELLER_CONSUMER_SEG == 'C') & (SELLER_SEG != '04 YS')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (dc_string != '10005') & (amt2 == 'a-1k') & (SELLER_CONSUMER_SEG == 'C') & (SELLER_SEG == '04 YS') & (dc_string != '10008')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'c-1h') & (amt2 != 'e-<50')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_tpv(self):
rules = [
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 == 'a-1k') & (SELLER_CONSUMER_SEG == 'C') & (SELLER_SEG == '04 YS') & (SUB_FLOW == 'MS Send Money Internal') & (dc_string == '10008')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 == 'a-1k') & (SELLER_CONSUMER_SEG == 'C') & (SELLER_SEG == '04 YS') & (SUB_FLOW != 'MS Send Money Internal')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string != '10008') & (SUB_FLOW != 'MS Mobile Money Request') & (SUB_FLOW != 'MS Money Request') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string == '10002') & (amt2 != 'c-1h')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string != '10008') & (SUB_FLOW != 'MS Mobile Money Request') & (SUB_FLOW != 'MS Money Request') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '10002') & (amt2 != 'c-1h') & (SUB_FLOW == 'MS Send Money Internal') & (RCVR_CNTRY_CODE != 'CA ') & (dc_string != '12122') & (dc_string != '10010') & (SELLER_SEG != '04 YS') & (amt2 != 'e-<50')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 == 'a-1k') & (SELLER_CONSUMER_SEG == 'C') & (SELLER_SEG != '04 YS')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (amt2 == 'a-1k') & (IS_ULP_TRANS_T_F >= 0.5)"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_tpv2(self):
rules = [
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 != 'c-1h') & (dc_string != '10005') & (amt2 != 'd-50') & (amt2 != 'e-<50') & (SELLER_CONSUMER_SEG == 'C') & (dc_string != '12123') & (amt2 == 'a-1k') & (SELLER_SEG == '04 YS') & (SUB_FLOW == 'MS Send Money Internal') & (dc_string == '10008')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 != 'c-1h') & (dc_string != '10005') & (amt2 != 'd-50') & (amt2 != 'e-<50') & (SELLER_CONSUMER_SEG == 'C') & (dc_string != '12123') & (amt2 == 'a-1k') & (SELLER_SEG == '04 YS') & (SUB_FLOW != 'MS Send Money Internal')",
"(SELLER_CONSUMER_SEG != 'Y') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '<missing>') & (amt2 != 'c-1h') & (dc_string != '10005') & (amt2 != 'd-50') & (amt2 != 'e-<50') & (SELLER_CONSUMER_SEG == 'C') & (dc_string != '12123') & (amt2 == 'a-1k') & (SELLER_SEG != '04 YS')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string != '10008') & (SUB_FLOW != 'MS Mobile Money Request') & (SUB_FLOW != 'MS Money Request') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string == '10002') & (amt2 != 'c-1h')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string != '10008') & (SUB_FLOW != 'MS Mobile Money Request') & (SUB_FLOW != 'MS Money Request') & (IS_ULP_TRANS_T_F >= 0.5) & (dc_string != '10002') & (amt2 != 'c-1h') & (SUB_FLOW == 'MS Send Money Internal') & (RCVR_CNTRY_CODE != 'CA ') & (dc_string != '12122') & (dc_string != '10010') & (SELLER_SEG != '04 YS') & (amt2 != 'e-<50')",
"(SELLER_CONSUMER_SEG == 'Y') & (dc_string == '10008') & (amt2 == 'a-1k') & (IS_ULP_TRANS_T_F >= 0.5)"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _duplicate_rows_to_new_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_us2_1_3'
dest_db = "pp_scratch_risk"
weight_a = 900
weight_b = 400
weight_c = 9
weight_d = 16
weight_e = 1
dest_table = "ms_auto_trend_us2_1_3_{}_{}_{}_{}_{}".format(weight_a, weight_b, weight_c, weight_d, weight_e)
instance.duplicate_rows_to_new_table(src_db, src_table, dest_db, dest_table, weight_a, weight_b, weight_c, weight_d, weight_e)
def _duplicate_rows_from_bad_and_sample_from_good_into_new_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_us'
dest_db = "pp_scratch_risk"
bad_scale = 1
good_scale = 3
weight_a = 52
weight_b = 16
weight_c = 23
weight_d = 5
weight_e = 4
dest_table = "ms_auto_trend_us_{}_{}__{}_{}_{}_{}_{}_v2".format(bad_scale, good_scale, weight_a, weight_b, weight_c, weight_d, weight_e)
instance.duplicate_rows_from_bad_and_sample_from_good_into_new_table(src_db, src_table, dest_db, dest_table,
bad_scale, good_scale,
weight_a, weight_b, weight_c, weight_d, weight_e)
def _generate_hl_job_json(self):
training_table = "ms_auto_trend_us2_1_3"
testing_table = "ms_auto_trend_us_t"
instance.generate_hl_job_json(training_table, testing_table, template_name='hl_job_template_na.json')
def _add_weight_col_to_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_us2_1_3'
# weight_a = 0.312
# weight_b = 0.140
# weight_c = 0.011
# weight_d = 0.011
# weight_e = 0.001
weight_a = 10 * 30
weight_b = 8 * 20
weight_c = 4.6 * 3
weight_d = 3.7 * 4
weight_e = 1 * 1
instance.add_weight_col_to_table(src_db, src_table, weight_a, weight_b, weight_c, weight_d, weight_e)
def _update_weight_col_in_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_us2_1_3'
src_col = 'PMT_USD_AMT'
instance.update_weight_col_in_table(src_db, src_table, src_col)
def _update_custom_weight_col_in_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_us2_1_3'
src_col = 'PMT_USD_AMT'
instance.update_custom_weight_col_in_table(src_db, src_table, src_col) | apache-2.0 |
Cignite/primdb | primdb_app/plot/chargecount.py | 2 | 1570 | '''
Counting the charge statistics from the database data.
'''
import psycopg2
import numpy.numarray as na
import matplotlib.pyplot as plt
#establish connection with the postgres server with the given configuration
conn = psycopg2.connect(host="localhost",user="primuser",password="web",database="primdb")
charges = [0,1,2,3,4]
chargecount = []
cur = conn.cursor()
for c in charges:
#fetch all the number of charge record from the table where where charge is 2 and 3 and 4
#and append to it the charges list for plotting
cur.execute("SELECT chargestate FROM primdb_app_selectedion where chargestate ='%d'" %(c))
chargecount.append(cur.rowcount)
#fetch the number of charge record from the table where where charge is greater than 4
#and append to it the charges list for plotting
cur.execute("SELECT chargestate FROM primdb_app_selectedion where chargestate >'4'")
chargecount.append(cur.rowcount)
labels = ["0", "1","2", "3","4",">5"]
maxitem = max(chargecount) +1000
colors = ['r','g', 'm','c','k','w']
xlocations = na.array(range(len(chargecount)))+0.5
width = 0.7
plt.bar(xlocations, chargecount, width=width, color=colors)
plt.xticks(xlocations+ width/2, labels)
plt.xlim(0, xlocations[-1]+width*2)
plt.xlabel("Charge")
plt.ylabel("Count per charge")
for x,y in zip(xlocations,chargecount):
plt.text(x+0.4, y, '%.2d' % y, ha='center', va= 'bottom')
#change the directory according to your application path.
plt.savefig(r'D:/Dropbox/Dropbox/primdb/assets/img/statistics2.png', dpi=100, transparent=True)
| agpl-3.0 |
domagalski/pocketcorr | scripts/pocketcorr_adc.py | 1 | 9505 | #!/usr/bin/env python2
################################################################################
## This script is for simple ADC caputer to test the a pocket correlator.
## Copyright (C) 2014 Rachel Simone Domagalski: [email protected]
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## ## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import sys
import argparse
import numpy as np
import pocketcorr as pc
from numpy import fft
#BRAM_SIZE = 4 << 11
BRAM_SIZE = 4 << 10
BRAM_WIDTH = 32
NBITS = 8
class ADC(pc.POCO):
def adc_read(self, start_pol, demux=1, capture='adc'):
"""
Read the time domain signals out of a BRAM.
"""
#XXX need to do demux2 eq blocks
# Read the register containing the ADC captures.
if demux == 2 and capture == 'pfb':
names = ['pfb_real', 'pfb_imag']
# XXX data type should be <i4 after recompile
real = np.fromstring(self.read(names[0], BRAM_SIZE), '>i4')
imag = np.fromstring(self.read(names[1], BRAM_SIZE), '>i4')
pfb_read = np.zeros(BRAM_SIZE / 4, dtype=np.complex64)
pfb_read.real = real# map(twos_comp, real)
pfb_read.imag = imag#map(twos_comp, imag)
return pfb_read
else:
read_size = BRAM_SIZE
nbits = demux*NBITS
npols = BRAM_WIDTH/nbits
first = str(start_pol)
last = str(start_pol + npols - 1)
adc = capture + '_'*int(demux>1)
# I feel one day I'm going to look back on this and shake my head.
if self.poco == 'spoco12':
if adc == 'pfb':
print 'ERROR: This is messed up on the FPGA.'
sys.exit(1)
elif adc == 'fft':
last = str(start_pol + 6)
read_size *= 2
npols /= 2
elif adc == 'eq':
last = str(start_pol + 1)
read_size *= 2
adc += '_cap_'
# There is a sync pulse somewhere in the data
if adc[:2] == 'eq' or adc[:3] == 'fft':
sync = self.read(adc + 'sync', read_size).find(chr(1)) / 4
concat = self.read(adc + '_'.join([first, last]), read_size)
# Get the formatting for the data.
if adc[:3] != 'fft':
shape = (read_size/(npols*demux), npols*demux)
fmt = '>i1'
else:
shape = (read_size/(npols*demux*2), npols*demux)
fmt = '>i2'
# Parse the data into usable values.
adc_read = np.fromstring(concat, fmt).reshape(*shape)
if adc[:2] == 'eq' or adc[:3] == 'fft':
adc_read = adc_read[sync:sync+adc_read.shape[0]/2]
adc_read = list(adc_read.transpose()[::-1])
if adc[:3] == 'fft':
adc_read = adc_read[0] + 1j*adc_read[1]
split = len(adc_read)/2
adc_read = [adc_read[:split], adc_read[split:]]
if demux == 2:
adc_read = [np.r_[adc_read[2*i],adc_read[2*i+1]]
for i in range(len(adc_read)/2)]
for i in range(len(adc_read)):
reordered = np.copy(adc_read[i]).reshape(2, shape[0])
reordered = reordered.transpose().flatten()
adc_read[i] = np.copy(reordered)
# Return the data as a dictionary.
if capture == 'adc_cap':
capture = 'adc'
names = [capture + str(i) for i in range(start_pol, start_pol+npols)]
if adc[:3] == 'fft':
names = [capture + str(i) for i in [start_pol, start_pol+6]]
return zip(names, adc_read)
def twos_comp(num32, nbits=18):
"""
Perform the two-s compiment of some n-bit number.
"""
bit_sel = 2**nbits - 1
neg_bit = 1 << nbits - 1
num32 = num32 & bit_sel
if num32 & neg_bit:
return -(((1 << 32) - num32) & bit_sel)
else:
return num32
if __name__ == '__main__':
# Grab options from the command line
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--ip-roach', dest='roach', required=True,
help='Hostname/ip address of the ROACH.')
parser.add_argument('-N', '--npol',
default=8,
type=int,
help='Number of antennas in the rpoco design.')
parser.add_argument('-c', '--capture',
help='Block to capture from (SNAP only)')
parser.add_argument('-d', '--demux', default=1, type=int,
help='Demux mode of the ADC.')
parser.add_argument('-o', '--output-file',
dest='outfile',
help='NPZ file to save data to.')
parser.add_argument('-a', '--antennas', nargs='+',
help='Antennas to plot.')
parser.add_argument('-f', '--fft', action='store_true',
help='Run an FFT on the data.')
parser.add_argument('-S', '--samp-rate', default=200e6, type=float,
help='Samping rate of the ADC (for plots).')
args = parser.parse_args()
# Make sure that the user specified something to do.
if args.outfile is None and args.antennas is None:
print 'ERROR: Nothing to do.'
sys.exit(1)
if args.outfile is not None and args.antennas is None and args.fft:
print 'ERROR: This script only stores raw data.'
sys.exit(1)
# Connect to the ROACH.
poco = ADC(args.roach)
poco.wait_connected()
spoco12 = False
modelist = pc.mode_int2list(poco.read_int('ping'))
if modelist[0] == 'snap' and modelist[3] == 12:
spoco12 = True
poco.poco = 'spoco12'
if args.demux == 1 and args.capture is None:
if spoco12:
cap = 'adc'
else:
cap = 'new_raw'
else:
cap = args.capture
if spoco12: # See the else for description of the sequence
poco.write_int(cap + '_cap_raw_trig', 1)
poco.write_int(cap + '_cap_raw', 1)
poco.write_int(cap + '_cap_raw', 0)
poco.write_int(cap + '_cap_raw_trig', 1)
else:
# Enable the ADC capture
poco.write_int(cap + '_capture_trig', 1)
# capture the ADC.
poco.write_int(cap + '_capture', 1)
poco.write_int(cap + '_capture', 0)
# Turn off ADC capture.
poco.write_int(cap + '_capture_trig', 0)
# Collect data and store it as a dictionary
adc_capture = []
nbits = args.demux * NBITS
if cap == 'pfb' and not spoco12:
pfb_capture = poco.adc_read(0, args.demux, cap)
else:
npol = args.npol
step_size = BRAM_SIZE/nbits
if cap == 'eq' or cap == 'fft':
npol /= 2
step_size = 1
for i in range(0, npol, step_size):
adc_capture += poco.adc_read(i, args.demux, cap)
adc_capture = dict(adc_capture)
# Now we either save or plot the data.
if args.outfile is not None:
np.savez(args.outfile, **adc_capture)
# Set this for plotting
#if args.demux == 1 and args.capture is None:
# cap = 'adc'
if args.antennas is not None:
import matplotlib.pyplot as plt
if cap == 'pfb' and not spoco12:
plt.plot(np.abs(pfb_capture)**2)
else:
time_axis = np.arange(BRAM_SIZE*nbits/BRAM_WIDTH) * 1e6 / args.samp_rate
freq_axis = fft.fftfreq(len(time_axis), 1e6 / args.samp_rate)
freq_axis = freq_axis[:len(freq_axis)/2] # ADC data is real
for ant in args.antennas:
plt.figure()
name = cap + ant
sample = adc_capture[name]
if args.fft or cap == 'fft':
if args.fft:
pspec = np.abs(fft.fft(sample)[:len(sample)/2])**2
pspec = 10*np.log10(pspec / np.max(pspec))
plt.plot(freq_axis, pspec)
plt.xlabel('Frequency (MHz)')
plt.ylabel('Power (dB)')
else:
pspec = np.abs(sample)**2
plt.plot(pspec)
plt.xlabel('Frequency (chan)')
plt.ylabel('Power')
plt.title(name)
#plt.axis([0, freq_axis[-1], np.min(pspec), 0])
else:
plt.plot(time_axis, sample)
plt.xlabel('Time ($\mu s$)')
plt.ylabel('Amplitude (ADU)')
plt.title(name)
plt.axis([0, time_axis[-1], np.min(sample)-2, np.max(sample)+2])
plt.show()
| gpl-3.0 |
tawsifkhan/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
ktbartolotta/periodically | pwords.py | 1 | 116625 | def get_pwords():
return ['ac','acacias','accept','acceptance','acceptances','accepts','access','accesses','accession','accessions','acclamation','accountancy','accra','accretion','accretions','accrual','accruals','accrues','accuracy','accurate','accurateness','accusation','accusations','accusatives','accuse','accuser','accusers','accuses','acerbic','aces','ache','achebe','achernar','aches','acheson','achier','achoo','achy','acne','aconite','aconites','acorn','acorns','acosta','acoustic','acoustical','acoustics','acre','acreages','acres','action','actions','activates','activation','actives','activism','activities','acuff','acute','acuteness','acutes','ag','agar','agassi','agates','ages','agism','agitate','agitates','agitation','agitations','agnes','agnew','agni','agnostic','agnosticism','agnostics','ago','agonies','agony','agra','agrees','aguirre','agustin','al','alamo','alar','alaric','alas','alb','alba','albacore','albacores','albany','alberio','alberta','albino','albinos','albion','albireo','albs','alcestis','alcoholic','alcoholics','alcoholism','alcoves','alcuin','alcyone','alertly','ales','alfonso','alfresco','alga','algebra','algebraic','algebras','algenib','alhambra','alhena','ali','alias','aliases','alibi','alibis','alice','alimony','aline','alines','alioth','alison','alistair','alkali','alkalies','alkaline','alkalis','allah','allan','allay','allays','allies','allison','alliteration','alliterations','allure','allures','allusion','allusions','alluvial','almond','almonds','alnitak','aloes','alohas','alone','aloof','aloofness','alpaca','alpacas','alphas','alphecca','alphonse','alphonso','alpine','alpo','alps','also','alsop','alta','altai','altaic','altair','alteration','alterations','alternates','alternation','alternations','alternatives','altho','altiplano','alumni','alumnus','alvaro','alvin','alyce','alyson','am','amaru','amass','amasses','amaterasu','amati','amber','ambition','ambitions','ambitious','ambitiousness','ambulance','ambulances','ambush','ambushes','america','american','americana','americanism','americanisms','americans','americas','amerind','amerinds','ameslan','amharic','amiga','amir','amirs','amish','amiss','ammo','amnesiac','amnesiacs','amnesties','amniocenteses','amniocentesis','amoco','amok','amos','amp','amparo','amperes','amphibious','amplification','amplifications','amplifier','amplifiers','amplifies','amplify','amps','amputate','amputates','amputation','amputations','amputees','amuck','amundsen','amuse','amuses','amy','ar','aral','aramco','ararat','arboreal','arboreta','arc','arcane','arch','archbishop','archbishopric','archbishoprics','archbishops','archer','archers','archery','arches','archives','archness','arcs','arctic','arctics','ares','argentina','argentine','arias','aries','arieses','arise','arisen','arises','arius','ark','arks','arline','armonk','arneb','arno','aron','arose','around','arousal','arouse','arouses','arrange','arranges','array','arrays','arrears','arsenal','arsenals','arsenic','arson','artichokes','articles','articulate','articulateness','articulates','articulation','articulations','artier','artifice','artificer','artificers','artifices','artificial','artiste','artistes','artistic','artless','artlessness','aruba','as','ascend','ascends','ascension','ascensions','ascertain','ascertains','ascetic','asceticism','ascetics','ascribe','ascribes','ascription','ash','ashcan','ashcans','ashe','ashen','ashes','ashier','ashore','ashy','asiatic','asiatics','asimov','asinine','ask','asks','asocial','asp','asparagus','asperities','aspersion','aspersions','aspic','aspics','aspirate','aspirates','aspiration','aspirations','aspire','aspires','aspirin','aspirins','asps','ass','assam','assassin','assassinate','assassinates','assassination','assassinations','assassins','assertion','assertions','asses','assess','assesses','asseverates','assisi','assistance','associates','association','associations','assonance','assuages','assurance','assurances','assure','assures','astaire','astana','astern','asunder','at','atari','atheism','atheistic','athena','athens','atherosclerosis','atkins','atkinson','atlanta','atlantes','atlantic','atlantis','atlas','atlases','atmosphere','atmospheres','atmospheric','atonal','atone','atones','atop','atreus','ats','attach','attack','attacker','attackers','attacks','attain','attains','attend','attender','attends','attention','attentions','attenuates','attenuation','attestation','attestations','attic','attica','attics','attila','attire','attires','atypical','au','aubrey','auburn','auckland','auction','auctioneer','auctioneers','auctions','auk','auks','aura','auras','aurelio','aurelius','aureola','aureolas','auspice','auspices','auspicious','auspiciousness','aussies','austen','austere','austin','austins','autism','autistic','b','ba','baal','baas','baath','babar','babbage','babe','babes','babier','babies','baboon','baboons','babushkas','baby','babyish','babysat','baccalaureates','bacchus','bach','back','backache','backaches','backbite','backbites','backbone','backbones','backer','backers','backfire','backfires','backhoes','backlash','backlashes','backpack','backpacker','backpackers','backpacks','backs','backslapper','backslappers','backslash','backspace','backspaces','backspin','backstage','backstairs','backup','backups','backus','backwash','backwater','backwaters','bacon','baffin','bag','baggage','bagpipes','bags','baguio','bah','bahamas','bahrain','baikal','bailiff','bailiffs','bailiwick','bailiwicks','baker','bakeries','bakers','bakery','bakes','baku','bakunin','balalaikas','balance','balances','balanchine','balaton','balconies','balcony','bales','bali','balinese','balk','balkhash','balkier','balks','balky','ballistic','ballistics','baloney','balsam','balsams','balsas','baltic','baltimore','baluchistan','bambi','bamboo','bamboos','ban','banach','banal','banalities','banana','bananas','band','bandages','bandier','bandies','bandolier','bandoliers','bands','bandstand','bandstands','bandwagon','bandwagons','bandy','bane','banes','bani','banish','banishes','bank','bankbook','bankbooks','banker','bankers','banknote','banknotes','bankrupt','bankruptcies','bankruptcy','bankrupts','banks','banneker','banner','banners','banns','bans','banshees','baobab','baobabs','baptism','baptismal','baptisms','baptiste','bar','barabbas','barack','barb','barbara','barbaric','barbarism','barbarisms','barbarities','barbarous','barbary','barbecues','barber','barbers','barbershop','barbershops','barbra','barbs','barclay','bare','bareback','bareness','barer','bares','barf','barfs','bargain','bargainer','bargains','barge','barges','bark','barker','barkers','barks','barn','barnabas','barnaby','barnacles','barnes','barney','barns','baron','baroness','baronesses','baronial','barons','barrack','barracks','barrage','barrages','barren','barrener','barrenness','barrens','barrera','bars','bartender','bartenders','barth','baruch','baryshnikov','basal','base','baseline','baselines','baseness','baser','bases','bash','bashes','basho','basic','basics','basilica','basilicas','basin','basins','basis','bask','basks','basra','bass','basses','bassi','basso','bassoon','bassoons','bassos','baste','bastes','bastion','bastions','bat','batch','batches','bate','bates','bath','bathe','bather','bathers','bathes','bathhouse','bathhouses','bathos','baths','bathsheba','batik','batiks','batista','baton','batons','bats','battalion','battalions','batten','battens','battier','battles','battleship','battleships','batu','bauer','bauhaus','bawdy','bay','bayer','bayes','bayonne','bayou','bayous','bayreuth','bays','baywatch','be','beach','beaches','beacon','beacons','beam','beams','bear','bearer','bearers','bearish','bearnaise','bears','bearskin','bearskins','beastlier','beastliness','beastly','beat','beater','beaters','beatific','beatification','beatifications','beatifies','beatify','beatnik','beatniks','beats','beau','beaus','beauteous','beauties','beautification','beautifier','beautifiers','beautifies','beautify','beauvoir','bebop','bebops','because','beck','becker','beckon','beckons','becks','becky','beer','beers','bees','before','began','behalf','behalves','behemoth','behemoths','behind','behinds','behooves','beige','bekesy','bela','belarus','belau','belay','belays','belies','belushi','ben','benacerraf','benares','bench','benches','bend','bender','bends','beneath','benefaction','benefactions','benefice','beneficence','beneficently','benefices','beneficial','beneficiaries','beneficiary','bengali','benin','benita','benny','benson','bentham','berate','berates','berber','berbers','berenice','bergen','bergerac','beriberi','berkshire','berkshires','berlin','berliner','berlins','bern','bernays','bernbach','berne','bernice','bernini','bernstein','berra','berserk','berta','berth','berths','besmirch','besmirches','bess','bestial','bestiaries','bestiary','beta','betakes','betas','beth','bethink','bethinks','bethune','beulah','beverages','bevies','bevy','bewares','bewitch','bewitches','beyer','beyond','bhopal','bhutan','bi','bias','biases','bib','biblical','bibs','bic','bicameral','bicentennial','bicentennials','bicep','biceps','bicepses','bicker','bickers','bicycles','bier','bierce','biers','bifocal','bifocals','bigamous','biker','bikers','bikes','bikini','bikinis','biko','bilious','bimonthlies','bin','binaries','binary','bind','binder','binderies','binders','bindery','binds','binge','binges','binnacles','bins','bioko','bionic','biophysics','biopsies','biopsy','biosphere','biospheres','bipartite','biplane','biplanes','biracial','birch','birches','birdbath','birdbaths','biro','birth','birthplace','birthplaces','birthrate','birthrates','births','biscay','biscayne','bisection','bisections','bishop','bishopric','bishoprics','bishops','bismarck','bismark','bismuth','bison','bisons','bissau','bitch','bitches','bitchier','bitchy','bite','bites','bivalves','bivouac','bivouacs','bk','blab','blabbermouth','blabbermouths','blabs','black','blackburn','blacker','blackish','blackness','blacks','blacksmith','blacksmiths','blackthorn','blackthorns','blah','blaine','blair','blanca','blanch','blanche','blanches','bland','blander','blandness','blank','blanker','blankness','blanks','blare','blares','blasphemous','blatantly','blavatsky','blind','blinder','blinders','blindness','blinds','blink','blinker','blinkers','blinks','blip','blips','bliss','blither','blu','blubber','blubbers','blucher','bluer','blues','bluff','bluffer','bluffers','bluffs','bluish','blunder','blunderbuss','blunderbusses','blunderer','blunderers','blunders','bluntly','blurb','blurbs','blush','blusher','blushers','blushes','boar','boars','boas','boat','boater','boaters','boats','bob','bobbi','bobbies','bobbin','bobbins','bobby','bobcat','bobcats','bobolink','bobolinks','bobs','bobwhite','bobwhites','boccaccio','body','boer','boers','bogey','bogeys','boise','bola','bolas','bolivar','bonaparte','bonbon','bonbons','bond','bonds','bone','boner','boners','bones','boney','boneyer','bonfire','bonfires','bonier','bonita','bonkers','bonn','bonner','bonnier','bonny','bono','bonus','bonuses','bony','boo','boob','boobies','boobs','booby','book','bookcase','bookcases','booker','bookies','bookish','books','bookshop','bookshops','boon','boondocks','boone','boons','boos','bootblack','bootblacks','bootees','booth','booths','booties','bootless','bop','bops','bore','boreas','bores','born','boru','bosch','bose','bosh','bosporus','boss','bosses','bossier','bossiness','bossy','bosun','bosuns','botanical','botany','botch','botches','both','bother','bothers','bounce','bouncer','bouncers','bounces','bouncier','bouncy','bound','boundaries','boundary','bounder','bounders','bounds','bounteous','bounties','bourbon','bovary','bovine','bovines','bow','bower','bowers','bowery','bows','boxes','boy','boyer','boyish','boyishness','boys','br','bra','brace','braces','brackish','brady','brag','brags','brahe','brain','brainier','brains','brainteaser','brainteasers','brainwash','brainwashes','brainy','braise','braises','brakes','brampton','bran','branch','branches','brand','brandi','brandies','brandish','brandishes','brando','brandon','brands','brandy','bras','brash','brasher','brashness','brass','brasses','brassier','brassieres','brassy','brat','brats','brattain','brattier','braver','bravery','braves','bravo','bravos','bravura','bravuras','brawn','brawnier','brawniness','brawny','bray','brays','breach','breaches','breastbone','breastbones','breath','breathe','breather','breathers','breathes','breathier','breaths','breathy','brennan','brenner','brethren','breviaries','breviary','brew','brewer','breweries','brewers','brewery','brews','briar','briars','bribe','bribery','bribes','brice','brick','brickbat','brickbats','bricklayer','bricklayers','bricks','brier','briers','brigand','brigands','brigantine','brigantines','brine','brinier','brink','brinks','briny','brisbane','brisk','brisker','briskness','brisks','bristles','bristlier','bristly','britain','britannic','britannica','britches','british','britisher','brno','broach','broaches','broccoli','brochure','brochures','brock','brogan','brogans','broker','brokerages','brokers','bronchi','bronchial','bronchitis','broncho','bronchos','bronchus','bronco','broncos','bronson','bronte','brooch','brooches','brook','brooks','broth','brother','brotherliness','brothers','broths','brow','browbeat','browbeats','brown','browne','browner','brownies','brownish','browns','brows','browse','browser','browsers','browses','brubeck','bruce','bruckner','bruin','bruins','bruise','bruiser','bruisers','bruises','brunch','brunches','brunei','bruno','brunswick','brush','brushes','brusk','brusker','bruskness','brutalities','brute','brutes','brutish','bryce','brynner','bryon','bubblier','buber','buccaneer','buccaneers','buck','buckner','buckram','bucks','buckskin','buckskins','buckwheat','bucolic','bucolics','buff','buffalo','buffaloes','buffalos','buffer','buffers','buffoon','buffoonery','buffoons','buffs','buffy','bugaboo','bugaboos','buick','bukharin','bulrush','bulrushes','bun','bunch','bunche','bunches','bundesbank','bunin','bunion','bunions','bunk','bunker','bunkers','bunkhouse','bunkhouses','bunks','bunnies','bunny','buns','bunsen','buoy','buoys','bureau','bureaucracies','bureaucracy','bureaucrat','bureaucratic','bureaucrats','bureaus','burn','burner','burners','burnish','burnishes','burnoose','burnooses','burnous','burnouses','burns','burundi','bus','busbies','busboy','busboys','busby','busch','buses','bush','bushes','bushier','bushiness','bushwhack','bushwhacker','bushwhackers','bushwhacks','bushy','busier','busies','business','businesses','buss','busses','bustles','busy','busybody','busyness','butane','butch','butcher','butcheries','butchers','butchery','butches','butler','butlers','buy','buyer','buyers','buys','by','byers','byes','bylaw','bylaws','byline','bylines','bypass','bypasses','byplay','bystander','bystanders','byte','bytes','c','ca','cab','cabal','cabals','cabana','cabanas','cabbage','cabbages','cabbies','cabby','cabin','cabins','caboose','cabooses','cabral','cabrera','cabrini','cabs','cacao','cacaos','cache','caches','cacophonies','cacophonous','cacophony','cacti','caesar','caesars','caesura','caesuras','caffeine','caftan','caftans','cage','cages','cagey','cageyness','cagier','caginess','cagney','cagy','cain','cains','cairn','cairns','cairo','caisson','caissons','caitlin','cakes','cal','calabash','calabashes','calais','calamine','calamities','calcifies','calcify','calcine','calcines','calcite','calculate','calculates','calculation','calculations','calculi','calculus','calculuses','calf','calfs','calfskin','calhoun','cali','caliban','caliber','calibers','calibrate','calibrates','calibration','calibrations','calico','calicoes','calicos','calif','califs','caliper','calipers','caliph','caliphates','caliphs','calk','calks','callao','callas','calliopes','calliper','callipers','callus','calluses','calmness','caloocan','calumniates','calumnies','calumny','calvary','calves','calvin','calvinism','calvinisms','calvinistic','calyces','calypso','calypsos','calyxes','cam','camacho','camber','cambers','cambric','cameras','cameron','cameroon','cameroons','camouflage','camouflages','camp','campanili','camper','campers','campfire','campfires','campier','campinas','campos','camps','campsite','campsites','campus','campuses','campy','cams','camus','can','canal','canals','canaries','canary','canasta','canaveral','canberra','cancan','cancans','cancelation','cancer','cancerous','cancers','cancun','candice','candies','candy','cane','canes','canine','canines','canker','cankerous','cankers','cannabis','cannabises','canneries','cannery','cannes','cannibal','cannibalism','cannibalistic','cannibals','cannier','canniness','cannon','cannons','canny','canoes','canon','canonical','canons','canopies','canopus','canopy','cans','cantankerous','cantankerousness','cantata','cantatas','canticles','canute','canvas','canvasback','canvasbacks','canvases','canvass','canvasser','canvassers','canvasses','canyon','canyons','cap','capabilities','capablanca','capacious','capaciousness','capacitance','capacities','caparison','caparisons','caper','capers','capes','caph','capitalism','capitalistic','capon','capone','capons','capote','cappuccino','cappuccinos','capra','capri','caprice','caprices','capricious','capriciousness','capricorn','capricorns','caps','capstan','capstans','capt','captain','captaincies','captaincy','captains','caption','captions','captious','captivates','captivation','captives','captivities','capture','captures','capuchin','car','cara','caracalla','caracas','carafe','carafes','carapace','carapaces','carat','carats','carbine','carbines','carbon','carbonate','carbonates','carbonation','carboniferous','carbons','carboy','carboys','carbuncles','carcass','carcasses','carcinogen','carcinogenic','carcinogenics','carcinogens','care','career','careers','cares','caress','caresses','caretaker','caretakers','careworn','carey','carib','caribou','caribous','caricature','caricatures','caries','carina','carla','carlin','carnage','carnal','carnap','carnation','carnations','carney','carnival','carnivals','carnivore','carnivores','carolina','caroline','carousal','carousals','carouse','carouser','carousers','carouses','carp','carpal','carpals','carpi','carps','carpus','cars','carsick','carsickness','carson','cartier','cartilage','cartilages','caruso','carver','carvers','carves','cary','casals','case','casein','cases','casey','cash','cashes','cashew','cashews','cashier','cashiers','casino','casinos','casio','cask','casks','caspar','cassias','cassino','cassinos','cassius','cassock','cassocks','caste','castes','castigate','castigates','castigation','castles','casual','casualness','casuals','casualties','cat','cataclysm','cataclysmic','cataclysms','catalan','catalina','catalpa','catalpas','catalysis','catalytic','catarrh','catatonic','catatonics','catawba','catboat','catboats','catch','catcher','catchers','catches','catchier','catchphrase','catchup','catchy','catechise','catechises','catechism','catechisms','cater','caterer','caterers','caters','catfish','catfishes','catharses','catharsis','cathartic','cathartics','cather','catherine','catholic','catholicism','catholicisms','catholics','cathy','catiline','cation','cations','catkin','catkins','catnap','catnaps','catnip','cato','cats','catsup','cattier','cattiness','catwalk','catwalks','caucasus','cauchy','caucus','caucuses','causal','causalities','causation','cause','causes','caustic','caustics','caution','cautionary','cautions','cautious','cautiousness','cavalier','cavaliers','cavern','cavernous','caverns','caves','caviar','cavities','caw','caws','cayuga','cd','ceases','ceausescu','cebu','celibacy','celibate','celibates','celina','cenotaph','cenotaphs','censer','censers','censure','censures','census','censuses','centaurus','centenaries','centenary','centennial','centennials','cephalic','cepheus','ceramic','ceramics','cerberus','cereal','cereals','cerebra','cerebral','ceremonial','ceremonials','ceremonies','ceremonious','ceremony','cerenkov','ceres','cerf','cerise','certain','certainties','certificate','certificates','certification','certifications','certifies','certify','cervical','cervices','cervixes','cesar','cessation','cessations','cession','cessions','cessna','cf','chalice','chalices','chalk','chalkier','chalks','chalky','chamber','chamberlain','chamberlains','chambers','chambray','chamois','champ','champagne','champagnes','champion','champions','championship','championships','champlain','champs','char','charbray','charcoal','charcoals','charge','charges','charier','charioteer','charioteers','charismatic','charismatics','charities','charlatan','charlatans','charolais','charon','chars','chartism','chary','chaser','chasers','chases','chassis','chaste','chasten','chastens','chastise','chastises','chat','chats','chattanooga','chatterboxes','chattier','chattiness','chaucer','chauncey','chauvinism','chauvinistic','che','cheat','cheater','cheaters','cheats','chechen','check','checkbook','checkbooks','checker','checkers','checks','checkup','checkups','cheer','cheerier','cheeriness','cheerios','cheers','cheery','cheeses','cheesier','cheesy','chef','chefs','chekhov','chen','cheney','chengdu','chennai','cheops','cheri','cherish','cherishes','chernenko','cherub','cherubic','cherubs','cheshire','chess','chevalier','chevy','chew','chewer','chewers','chewier','chews','chewy','chi','chiba','chic','chicago','chicana','chicaneries','chicanery','chicano','chicer','chichi','chichis','chick','chicks','chiffon','chili','chilies','chilis','chin','china','chinatown','chinese','chink','chinks','chino','chinook','chinooks','chinos','chins','chip','chipmunk','chipmunks','chipper','chippers','chips','chirico','chiropody','chiropractic','chiropractics','chirp','chirps','chirrup','chirrups','chisinau','chitchat','chitchats','chitin','chitlins','chivas','chives','chock','chocks','chocolate','chocolates','choctaw','choice','choicer','choices','choir','choirs','choker','chokers','chokes','choose','chooses','choosey','choosier','choosy','chop','chopin','chopper','choppers','choppier','choppiness','choppy','chopra','chops','chopstick','chopsticks','chore','chores','chorus','choruses','chose','chosen','chou','chow','chows','chubbier','chubbiness','chubby','chuck','chucks','chukchi','chunk','chunkier','chunkiness','chunks','chunky','churn','churns','chute','chutes','chuvash','ci','cicero','cinch','cinches','cinchona','cinchonas','cincinnati','cinder','cinders','cindy','cinnabar','cinnamon','cipher','ciphers','cipro','circa','circe','circles','circulate','circulates','circulation','circulations','circus','circuses','cirrhosis','cirrus','cisco','cistern','cisterns','citation','citations','cite','cites','citibank','cities','civic','civics','civies','civilities','civvies','cl','clack','clacks','clair','claire','clam','clambakes','clamber','clambers','clamp','clampdown','clampdowns','clamps','clams','clan','clancy','clandestine','clank','clanks','clannish','clans','clap','clapper','clappers','claps','clapton','claptrap','clara','clare','clarence','clarendon','clarice','clarification','clarifications','clarifies','clarify','clarion','clarions','clark','clash','clashes','clasp','clasps','class','classes','classic','classical','classicism','classics','classier','classification','classifications','classifies','classify','classiness','classmates','classy','claus','clause','clauses','clausius','clavicles','claw','claws','clay','clayier','cleric','clerical','clerics','clerk','clerks','cliburn','click','clicks','cliff','cliffs','clinch','clincher','clinchers','clinches','cline','clinic','clinical','clinics','clink','clinker','clinkers','clinks','clio','clip','clipper','clippers','clips','clipt','clobber','clobbers','cloche','cloches','clock','clocks','clockwise','clone','clones','clop','clops','close','closeness','closer','closes','closure','closures','cloth','clothes','clothesline','clotheslines','clothespin','clothespins','clothier','clothiers','clotho','cloths','cloudy','clouseau','clover','clovers','cloves','clovis','clown','clownish','clownishness','clowns','cloy','cloys','club','clubhouse','clubhouses','clubs','cluck','clucks','clues','clunk','clunker','clunkers','clunkier','clunks','clunky','clutch','clutches','cm','co','coach','coaches','coagulate','coagulates','coagulation','coal','coalesce','coalescence','coalesces','coalition','coalitions','coals','coarse','coarsen','coarseness','coarsens','coarser','coastline','coastlines','coat','coats','cob','cobain','cobb','cobra','cobras','cobs','cocaine','cocci','coccis','coccus','coccyges','coccyxes','cochin','cochise','cochran','cock','cockatoo','cockatoos','cockier','cockiness','cockney','cockneys','cockroach','cockroaches','cocks','cocksucker','cocksuckers','cocksure','cocky','cocoas','cocoon','cocoons','cocteau','cody','coerce','coerces','coercion','coffees','coffer','coffers','coffey','coffin','coffins','cogency','cogently','cohen','cohere','coherence','coherently','coheres','cohesion','coif','coiffure','coiffures','coifs','coin','coinage','coinages','coins','cokes','cola','colander','colanders','colas','colic','colicky','colin','colitis','column','columns','como','con','conakry','conan','concatenate','concatenates','concatenation','concatenations','concavities','conceal','conceals','conceives','concept','conception','conceptions','concepts','conceptual','concern','concerns','concerti','concertina','concertinas','concession','concessionaire','concessionaires','concessions','conch','conches','conchs','concierge','concierges','conciliates','conciliation','concise','conciseness','conciser','conclaves','conclusion','conclusions','concoction','concoctions','concrete','concretes','concubine','concubines','concussion','concussions','condescend','condescends','condescension','condition','conditional','conditionals','conditioner','conditioners','conditions','condo','condoes','condone','condones','condos','conduce','conduces','conduction','cone','cones','confection','confectioner','confectioneries','confectioners','confectionery','confections','confer','conference','conferences','confers','confess','confesses','confession','confessional','confessionals','confessions','confine','confines','confiscate','confiscates','confiscation','confiscations','confound','confounds','confrontation','confrontational','confrontations','confucius','confuse','confuses','confusion','confusions','confute','confutes','conga','congas','congeal','congeals','congenial','congestion','conic','conical','conics','conifer','coniferous','conifers','conk','conks','conn','connection','connections','connectives','conner','connery','conniver','connivers','connives','connotation','connotations','connote','connotes','connubial','cons','conscious','consciousness','consciousnesses','conscript','conscription','conscripts','consecrate','consecrates','consecration','consecrations','consensual','consensus','consensuses','conservation','conservatism','conservatives','conserves','consistencies','consistency','consistently','consolation','consolations','consonance','consonances','conspicuous','conspiracies','conspiracy','conspire','conspires','constance','constancy','constantine','constantly','consternation','constipate','constipates','constipation','consulate','consulates','contain','container','containers','contains','contend','contender','contenders','contends','contention','contentions','contentious','continence','contingencies','contingency','continual','continuation','continuations','continues','continuous','conurbation','conurbations','convalesce','convalescence','convalescences','convalesces','converge','convergence','convergences','converges','conversation','conversational','conversations','converse','converses','conversion','conversions','conviction','convictions','convince','convinces','convivial','convocation','convocations','convokes','convolution','convolutions','convoy','convoys','coo','cook','cookbook','cookbooks','cooker','cookeries','cookers','cookery','cookies','cooks','cooky','coolies','coon','coons','coop','cooper','cooperates','cooperation','cooperatives','coopers','coops','coos','cooties','cop','copacabana','copernican','copernicus','copes','copier','copiers','copies','copious','copland','copper','coppers','coppery','coppice','coppices','coppola','copra','cops','copse','copses','copter','copters','coptic','copula','copulas','copulate','copulates','copulation','copy','copycat','copycats','cora','core','cores','corey','corfu','corn','corncob','corncobs','corner','corners','cornflakes','cornice','cornices','cornier','cornish','corns','cornucopias','cornwallis','corny','coruscate','coruscates','cosby','cosier','cosies','cosine','cosmic','cosmopolitan','cosmopolitans','cosmos','cosmoses','cossack','costco','costlier','costliness','costly','cosy','cote','cotes','couch','couches','countenance','countenances','counteraction','counteractions','counterespionage','countess','countesses','counties','countless','coup','couperin','coupes','coupon','coupons','coups','courage','courageous','cousin','cousins','cousteau','cover','covers','covertly','coves','cow','cowboy','cowboys','cower','cowers','cowlick','cowlicks','cowper','cowpokes','cowpuncher','cowpunchers','cows','cowslip','cowslips','coy','coyer','coyness','coyote','coyotes','cr','crab','crabbe','crabbier','crabbiness','crabby','crabs','crack','cracker','crackers','cracklier','cracks','crackup','crackups','craftier','craftiness','crag','crags','cram','cramp','cramps','crams','cranach','crane','cranes','cranial','crank','crankcase','crankcases','crankier','crankiness','cranks','cranky','crannies','cranny','crap','crapes','crappier','crappy','craps','crash','crashes','crass','crasser','crassness','crate','crater','craters','crates','cravat','cravats','craves','craw','crawfish','crawfishes','craws','cray','crayfish','crayfishes','crayola','crayon','crayons','cream','creamer','creameries','creamers','creamery','creamier','creaminess','creams','creamy','creases','creates','creation','creationism','creations','creatives','creature','creatures','crecy','creon','creosote','creosotes','crepes','crept','crescendi','crescendo','crescendos','cress','cretaceous','cretan','crete','cretin','cretinous','cretins','crevasse','crevasses','crevice','crevices','crew','crews','crib','cribbage','cribs','crick','cricks','crier','criers','cries','cringe','cringes','crinklier','crinoline','crinolines','crisco','crises','crisis','crisp','crisper','crispier','crispness','crisps','crispy','crisscross','crisscrosses','cristina','critic','critical','criticism','criticisms','critics','croat','croats','croce','croci','crock','crockery','crocks','crocus','crocuses','croesus','crone','crones','cronies','cronin','cronkite','cronus','crony','crook','crookes','crooks','croon','crooner','crooners','croons','crop','cropper','croppers','crops','crosby','crosier','crosiers','cross','crossbar','crossbars','crossbeam','crossbeams','crossbones','crossbow','crossbows','crosscheck','crosschecks','crosser','crosses','crossfire','crossfires','crossness','crossover','crossovers','crosswalk','crosswalks','crosswise','crotch','crotches','crouch','crouches','croup','croupier','croupiers','croupy','crow','crowbar','crowbars','crown','crowns','crows','crucial','crucifies','crucifixes','crucify','cruise','cruiser','cruisers','cruises','crunch','cruncher','crunches','crunchier','crunchy','crush','crushes','crustier','crutch','crutches','cruxes','cry','crybabies','crybaby','cryogenics','crypt','cryptic','crypts','cs','ctesiphon','cu','cub','cuba','cuban','cubans','cube','cubes','cubic','cubical','cubicles','cubism','cubs','cuchulain','cuckoo','cuckoos','cues','cuff','cuffs','cuisine','cuisines','culinary','cup','cupcakes','cupola','cupolas','cups','curacao','curacies','curacy','curate','curates','curatives','curb','curbs','cure','cures','cushier','cushion','cushions','cushy','cusp','cusps','cuss','cusses','cutback','cutbacks','cute','cuteness','cutesier','cutesy','cuticles','cutlass','cutlasses','cutlery','cuvier','cybernetic','cybernetics','cyberpunk','cyberpunks','cyberspace','cycles','cyclic','cyclical','cyclone','cyclones','cyclonic','cyclops','cylinder','cylinders','cynic','cynical','cynicism','cynics','cynosure','cynosures','cypher','cypress','cypresses','cyprus','cyrano','cyrus','cystic','db','dyer','dyers','dyes','dykes','dylan','dynamic','dynamical','dynamics','dynamism','dynamite','dynamites','dynamo','dynamos','dynastic','dynasties','dysfunction','dysfunctional','dysfunctions','dyson','er','eras','eraser','erasers','erases','erasure','erasures','erato','eric','erica','erich','erick','erickson','ericson','ericsson','erik','erin','eris','erna','ernestine','erogenous','eros','eroses','erosion','erotic','erotica','eroticism','errand','errands','errata','erratas','erratic','erse','erupt','eruption','eruptions','erupts','ervin','erwin','es','esau','escalate','escalates','escalation','escalations','escapes','escapism','escher','eschew','eschews','escrow','escrows','escutcheon','escutcheons','eskimo','eskimos','esophagi','esophagus','esophaguses','espies','espionage','espousal','espouse','espouses','espresso','espressos','espy','essen','essence','essences','essene','essential','essentials','establish','establishes','estate','estates','esteban','estela','estes','esther','eu','eucalypti','eucalyptus','eucalyptuses','eucharistic','eugene','eugenics','eugenio','eula','eunice','eunuch','eunuchs','euphony','euphrates','eutectic','f','faces','facial','facials','facilitate','facilitates','facilitation','facilities','faction','factional','factionalism','factions','factitious','fag','fagin','fags','falcon','falconer','falconers','falcons','falkland','falklands','fallacies','fallacious','fallacy','false','falseness','falser','falsification','falsifications','falsifies','falsify','falsities','falstaff','familial','familiar','familiars','families','famine','famines','famish','famishes','famous','far','farce','farces','farcical','fares','farina','farinaceous','farsi','farther','fascinate','fascinates','fascination','fascinations','fascism','fashion','fashions','fassbinder','fasten','fastener','fasteners','fastens','fat','fatal','fatalism','fatalistic','fatalities','fates','father','fatherland','fatherlands','fathers','fatness','fats','fatten','fattens','fattier','fatties','fatuous','fatuousness','faun','fauna','faunas','fauns','fauntleroy','faustino','fe','fear','fears','feat','feather','featherier','feathers','feathery','feats','feature','features','feb','februaries','february','fecal','feces','fecund','fees','feistier','felice','felicities','feline','felines','femora','fen','fence','fencer','fencers','fences','fend','fender','fenders','fends','fens','fer','feral','ferber','fern','fernando','ferns','ferocious','ferociousness','festival','festivals','festivities','feta','fetch','fetches','fetich','fetiches','fetish','fetishes','fetishism','fetishistic','fetlock','fetlocks','fever','feverish','fevers','few','fewer','fey','fiasco','fiascoes','fiascos','fiat','fiats','fib','fibber','fibbers','fiber','fibers','fibonacci','fibrous','fibs','fibula','fibulas','fiche','fiches','fichte','fiction','fictional','fictions','fictitious','fierce','fierceness','fiercer','fierier','fieriness','fiery','fiesta','fiestas','fife','fifes','fifth','fifths','fifties','filial','filipino','filipinos','fin','final','finales','finals','finance','finances','financial','financier','financiers','finch','finches','find','finder','finders','finds','fine','fineness','finer','finery','fines','finesse','finesses','finickier','finicky','finis','finises','finish','finisher','finishers','finishes','finite','fink','finks','finland','finn','finnegan','finnier','finnish','finns','finny','fins','fiona','fir','fire','firebrand','firebrands','firecracker','firecrackers','fireflies','firehouse','firehouses','fireplace','fireplaces','firepower','fireproof','fireproofs','fires','firewater','firmness','firs','firstborn','firstborns','firstly','firth','firths','fiscal','fiscals','fischer','fish','fisher','fisheries','fishers','fishery','fishes','fishhook','fishhooks','fishier','fishwife','fishwives','fishy','fisk','fission','fissure','fissures','fisticuffs','fitch','fitly','fiver','fives','fixes','fla','flab','flabbier','flabbiness','flabby','flack','flacks','flair','flairs','flak','flakes','flakier','flakiness','flaky','flan','flanagan','flanders','flange','flanges','flank','flanks','flap','flapper','flappers','flaps','flare','flares','flash','flashback','flashbacks','flasher','flashers','flashes','flashgun','flashguns','flashier','flashiness','flashy','flask','flasks','flatboat','flatboats','flatcar','flatcars','flatly','flaw','flaws','flaxen','flay','flays','flick','flicker','flickers','flicks','flier','fliers','flies','flinch','flinches','flintier','flintlock','flintlocks','flip','flippancy','flippantly','flipper','flippers','flips','flu','flub','flubs','flues','fluff','fluffier','fluffiness','fluffs','fluffy','flukes','flukier','fluky','flunk','flunkies','flunks','flunky','fluoresce','fluorescence','fluoresces','flush','flusher','flushes','flute','flutes','fluxes','fm','foal','foals','foam','foamier','foams','foamy','fob','fobs','focal','foch','foci','focus','focuses','focusses','foes','fogey','fogeys','fokker','folio','folios','fond','fonder','fondness','fondu','fondues','fondus','foolish','foolishness','footage','footlocker','footlockers','footloose','fop','foppish','fops','fora','forage','forages','foray','forays','forbes','forbore','fore','forebear','forebears','forecastles','foreclose','forecloses','foreclosure','foreclosures','forefather','forefathers','foregather','foregathers','forenames','forenoon','forenoons','forensic','forensics','foreplay','forerunner','forerunners','fores','foresees','foreskin','foreskins','forestation','foreswore','foresworn','foretaste','foretastes','forever','forevermore','forewarn','forewarns','fornicate','fornicates','fornication','fosse','found','foundation','foundations','founder','founders','founds','fountain','fountains','foxes','foyer','foyers','fr','fracas','fracases','fraction','fractional','fractions','fractious','fragrance','fragrances','fragrantly','framer','framers','frames','fran','franc','france','frances','francesca','franchise','franchisees','franchiser','franchisers','franchises','francine','francis','francisca','franciscan','francisco','franck','franco','francois','francoise','francs','frank','franker','frankincense','franklin','frankness','franks','franny','frantic','frappes','fraser','frat','fraternal','fraternities','frats','fray','frays','freer','frees','french','frenches','frenetic','freon','fresco','frescoes','frescos','fresh','freshen','freshens','fresher','freshness','freshwater','fresno','frey','fri','friar','friars','fricassees','friction','frier','friers','fries','frigate','frigates','fringe','fringes','fripperies','frippery','frisco','frisk','friskier','friskiness','frisks','frisky','frivolities','fro','frobisher','frock','frocks','frolic','frolics','frond','fronds','frontage','frontages','frontenac','frontier','frontiers','frostbite','frostbites','frostier','frostiness','froth','frothier','froths','frothy','frown','frowns','frowsier','frowsy','fructifies','fructify','fruitier','fruition','fruitless','fruitlessness','fry','fryer','fryers','fuchs','fuchsias','fuck','fucker','fuckers','fucks','fulani','fun','funafuti','function','functional','functionaries','functionary','functions','fund','funds','fundy','funeral','funerals','funereal','funk','funkier','funks','funky','funner','funnier','funnies','funniness','funny','furbish','furbishes','furnaces','furnish','furnishes','fuse','fuselage','fuselages','fuses','fushun','fusion','fusions','fuss','fusses','fussier','fussiness','fussy','fustier','ga','gab','gabbier','gabby','gabon','gabs','gaff','gaffe','gaffes','gaffs','gage','gages','gain','gaines','gains','gala','galactic','galapagos','galas','galibi','gamow','gander','ganders','gandhi','ganges','gap','gapes','gaps','garage','garages','garb','garbo','garbs','gareth','garner','garners','garnish','garnishees','garnishes','gas','gascony','gaseous','gases','gash','gashes','gasoline','gasp','gasps','gasser','gasses','gassier','gassy','gate','gatecrasher','gatecrashers','gates','gather','gatherer','gatherers','gathers','gauche','gaucher','gaucho','gauchos','gaudy','gauge','gauges','gauss','gautier','gavin','gawk','gawkier','gawkiness','gawks','gawky','gay','gayer','gayness','gays','gd','ge','gear','gearboxes','gears','gecko','geckoes','geckos','gees','geffen','gehenna','geishas','gelatin','gelatine','gelatinous','gen','gena','genaro','gender','genders','gene','genera','general','generalissimo','generalissimos','generalities','generals','generate','generates','generation','generations','generic','generics','generosities','generous','genes','geneses','genesis','genetic','genetics','genial','genies','genii','genitives','genius','geniuses','genoas','genre','genres','gentler','gentles','gently','genuine','genuineness','genus','genuses','geo','geoffrey','geophysical','geophysics','geopolitical','geopolitics','geostationary','gerber','gere','gerund','gerunds','gestapo','gestapos','gestate','gestates','gestation','gesticulate','gesticulates','gesticulation','gesticulations','gewgaw','gewgaws','geyser','geysers','h','hack','hacker','hackers','hackney','hackneys','hacks','hag','hagar','haggai','hags','hal','halcyon','haler','hales','half','halfback','halfbacks','hallucinate','hallucinates','hallucination','hallucinations','hallucinogen','hallucinogenic','hallucinogenics','hallucinogens','halo','haloes','halogen','halogens','halon','halos','hals','halsey','halves','ham','hamitic','hamlin','hammock','hammocks','hammond','hamper','hampers','hampshire','hampton','hams','hamsun','harass','harasses','harbin','hardback','hardbacks','hardy','hares','hark','harks','harlan','harmon','harmonic','harmonica','harmonicas','harmonics','harmonies','harmonious','harmoniousness','harmony','harness','harnesses','harp','harper','harpies','harpoon','harpoons','harps','harpy','harsh','harsher','harshness','harte','hartline','has','hasbro','hash','hasheesh','hashes','hashish','hasp','hasps','hassock','hassocks','haste','hasten','hastens','hastes','hastier','hastiness','hat','hatch','hatchback','hatchbacks','hatcheries','hatchery','hatches','hater','haters','hates','hath','hats','hatteras','haunch','haunches','he','heal','healer','healers','heals','health','healthier','healthiness','healthy','hear','hearer','hearers','hears','hearse','hearses','heartache','heartaches','heartburn','hearten','heartens','hearth','hearths','heartier','hearties','heartiness','heartless','heartlessness','heat','heater','heaters','heath','heathen','heathenish','heathens','heather','heaths','heats','hebe','hebraic','hebrew','hebrews','hecate','heck','hectare','hectares','hectic','hecuba','hefner','heftier','hegemony','heifer','heifers','heine','heinous','heinousness','heir','heiress','heiresses','heirs','helical','helices','helicon','helicopter','helicopters','heliopolis','helios','helixes','hemophiliac','hemophiliacs','hen','hence','hench','henderson','henna','hennas','hennessy','hens','henson','hep','hepatic','hepatitis','hepburn','hepper','heptagon','heptagons','her','hera','herb','herbaceous','herbage','herbal','herbivore','herbivores','herbs','here','hereby','herein','hereof','herero','heresies','heresy','heretic','heretical','heretics','hereupon','herewith','heritage','heritages','hernias','hero','heroes','heroic','heroics','heroin','heroine','heroins','heroism','heron','herons','heros','herpes','herrera','hers','hersey','hershey','hes','hesitancy','hesitantly','hesitate','hesitates','hesitation','hesitations','hesperus','hess','hesse','hew','hewer','hewers','hewn','hews','hexes','hey','hf','hg','hi','hiatus','hiatuses','hibachi','hibachis','hibernate','hibernates','hibernation','hibiscus','hibiscuses','hiccup','hiccups','hick','hickok','hicks','hierarchical','hierarchies','hierarchy','hies','hifalutin','hiker','hikers','hikes','hind','hinder','hinders','hindi','hindrance','hindrances','hinds','hindu','hinduism','hinduisms','hindus','hindustan','hindustani','hines','hinge','hinges','hip','hipparchus','hipper','hippies','hippo','hippocrates','hippocratic','hippos','hippy','hips','hiram','hire','hires','hirsute','his','hispanic','hispanics','hispaniola','hiss','hisses','hitachi','hitch','hitchcock','hitches','hitchhiker','hitchhikers','hitchhikes','hither','hitler','hitlers','hives','ho','hoagies','hoagy','hoarier','hoariness','hoarse','hoarseness','hoarser','hoary','hob','hobbes','hobbies','hobbs','hobby','hobnob','hobnobs','hobo','hoboes','hobos','hobs','hock','hockney','hocks','hockshop','hockshops','hoes','hoff','hogan','hogans','hohenstaufen','hohokam','hokier','holier','holiness','holistic','homogeneous','homophobic','homophone','homophones','hon','honcho','honchos','honduran','hondurans','honduras','hone','honecker','hones','honestly','honey','honeybees','honeymoon','honeymooner','honeymooners','honeymoons','honeys','honk','honks','honolulu','honshu','hooch','hoof','hoofs','hook','hooker','hookers','hooks','hookup','hookups','hooky','hooligan','hooliganism','hooligans','hoop','hooper','hoopla','hoops','hoorah','hoorahs','hooray','hoorays','hoosier','hootch','hoover','hoovers','hooves','hop','hopes','hopi','hopkins','hopper','hoppers','hops','hopscotch','hopscotches','horace','horacio','horatio','horn','hornier','hornpipes','horns','horny','horus','hos','hose','hoses','hosiery','hospice','hospices','hostage','hostages','hostelries','hostelry','hostess','hostesses','hostilities','hostler','hostlers','hotelier','hoteliers','hothouse','hothouses','hotly','hound','hounds','house','houseboat','houseboats','housebound','housecoat','housecoats','houseflies','househusband','househusbands','housemother','housemothers','houses','housewares','housewife','housewives','hover','hovers','how','howdy','hows','hsbc','hub','hubbies','hubbub','hubbubs','hubby','hubcap','hubcaps','huber','hubris','hubs','huck','huerta','hues','huff','huffier','huffs','huffy','huge','hugeness','huh','hui','hula','hulas','hun','hunch','hunchback','hunchbacks','hunches','hunk','hunker','hunkers','hunks','huns','hus','husband','husbands','hush','hushes','husk','husker','huskers','huskier','huskies','huskiness','husks','husky','hussar','hussars','hussein','hussies','hussite','hussy','hustler','hustlers','hustles','hutch','hutches','hutchinson','huygens','hyacinth','hyacinths','hymn','hymnal','hymnals','hymns','hyper','hyperbola','hyperbolas','hyperbolic','hypercritical','hyperion','hypersensitivities','hyperspace','hypertension','hypes','hyphen','hyphenate','hyphenates','hyphenation','hyphenations','hyphens','hypnoses','hypnosis','hypnotic','hypnotics','hypnotism','hypo','hypocrisies','hypocrisy','hypocrite','hypocrites','hypocritical','hypos','hypotenuse','hypotenuses','hypothalami','hypothalamus','hypotheses','hypothesis','hysteresis','i','iaccoca','iago','iamb','iambic','iambics','iambs','ibexes','ibices','ibis','ibises','iblis','ibo','ibsen','ibuprofen','icahn','icarus','ice','icebound','iceboxes','icecap','icecaps','iceland','icelander','icelanders','icelandic','ices','icicles','icier','iciness','ickier','icky','icon','iconoclastic','icons','icy','if','iffier','iffy','ifs','ikhnaton','ikon','ikons','ila','imogene','in','ina','inabilities','inaccuracies','inaccuracy','inaccurate','inaction','inamorata','inamoratas','inane','inaner','inanities','inapt','inarticulate','inasmuch','inattention','inauspicious','inborn','inbound','inc','inca','incandescence','incantation','incantations','incapacitate','incapacitates','incarcerate','incarcerates','incarceration','incarcerations','incarnate','incarnates','incarnation','incarnations','incas','incautious','incendiaries','incendiary','incense','incenses','incentives','inception','inceptions','inch','inches','inchon','incinerate','incinerates','incineration','incise','incises','incision','incisions','incite','incites','incivilities','inclination','inclinations','incline','inclines','inclose','incloses','inclosure','inclosures','inclusion','inclusions','incoherence','incoherently','inconsistencies','inconsistency','inconsistently','inconspicuous','inconspicuousness','inconstancy','incontinence','increases','incrustation','incrustations','incubate','incubates','incubation','incubi','incubus','incubuses','ind','indicate','indicates','indication','indications','indicatives','indices','indies','indifference','indifferently','indigence','indigenous','indigestion','indira','indirection','indirectly','indiscretion','indiscretions','indisposition','indispositions','indistinctly','indochina','indochinese','indore','indra','induce','induces','inductance','inductees','induction','inductions','indues','indus','indy','inebriates','inebriation','inelastic','inept','ineptly','ineptness','inertial','inertly','ines','inessential','inessentials','infamies','infamous','infamy','infarction','infatuates','infatuation','infatuations','infection','infections','infectious','infectiousness','infelicities','infer','inference','inferences','inferential','infernal','inferno','infernos','infers','infestation','infestations','infinite','infinities','infinitives','inflate','inflates','inflation','inflationary','infliction','influxes','info','infraction','infractions','infringe','infringes','infuse','infuses','infusion','infusions','inge','ingenious','ingenuous','ingenuousness','ingestion','inhalation','inhalations','inhaler','inhalers','inhales','inhere','inherently','inheres','inheritance','inheritances','inhibition','inhibitions','initial','initials','initiates','initiation','initiations','initiatives','ink','inkier','inkiness','inks','inky','inland','inlay','inlays','inline','inn','innate','inner','innocence','innocently','innocuous','innovates','innovation','innovations','inns','inoculate','inoculates','inoculation','inoculations','inonu','ins','inscribe','inscribes','inscription','inscriptions','inseam','inseams','insectivore','insectivores','insecure','insertion','insertions','inshore','insincere','insinuates','insinuation','insinuations','insistence','insistently','insofar','insomniac','insomniacs','inspiration','inspirational','inspirations','inspire','inspires','instance','instances','instantaneous','instantly','instep','insteps','instigate','instigates','instigation','insubstantial','insulate','insulates','insulation','insulin','insurance','insurances','insure','insures','intakes','intend','intends','intense','intenser','intensification','intensifier','intensifiers','intensifies','intensify','intensities','intensives','intention','intentional','intentions','intently','interaction','interactions','interfaces','interferes','interferon','intern','internal','internals','international','internationalism','internationals','internes','interneship','interneships','interns','internship','internships','intestate','intestinal','intestine','intestines','inundates','inundation','inundations','inure','inures','invar','invasion','invasions','inverse','inverses','inversion','inversions','invertebrate','invertebrates','investigate','investigates','investigation','investigations','inviolate','invitation','invitational','invitationals','invitations','invite','invites','invocation','invocations','invoice','invoices','invokes','io','ion','ionesco','ionic','ionics','ionosphere','ionospheres','ions','iota','iotas','iowas','iphone','ir','ira','iran','irate','irateness','ire','ireland','irene','iris','irises','irish','irisher','irk','irks','iron','ironic','ironical','ironies','irons','irony','irrational','irrationals','irresolute','irresolution','irruption','irruptions','irtish','irvin','irwin','is','isis','island','islander','islanders','islands','ism','isms','isobar','isobars','isolate','isolates','isolation','isolationism','issac','issachar','issues','itaipu','italic','italics','itasca','itch','itches','itchier','itchiness','itchy','iterate','iterates','iteration','iterations','itineraries','itinerary','ives','ivies','ivy','iyar','k','kalashnikov','kalb','kali','kampala','karachi','karat','karats','kari','karin','karina','karla','karo','karyn','kasparov','katharine','katherine','katheryn','kathy','katina','katowice','katy','kaunas','kc','keratin','keri','kern','kerosene','kerosine','kerouac','kfc','kharkov','khoikhoi','khorana','khrushchev','khufu','khyber','kibosh','kick','kickback','kickbacks','kicker','kickers','kickier','kickoff','kickoffs','kicks','kickstand','kickstands','kicky','kigali','kikuyu','kilroy','kimono','kimonos','kin','kind','kinder','kindlier','kindliness','kindness','kindnesses','kinds','kinetic','kink','kinkier','kinks','kinky','kinney','kinsey','kinship','kiosk','kiosks','kip','kipper','kippers','kirby','kirchhoff','kirchner','kiribati','kirk','kirkland','kirov','kirsten','kishinev','kiss','kisser','kissers','kisses','kitakyushu','kitchen','kitchener','kitchens','kite','kites','kith','kiwi','kiwis','klan','klaus','klimt','kline','knack','knacker','knacks','knapp','knapsack','knapsacks','knavery','knaves','knavish','knees','knew','knickerbocker','knickers','knickknack','knickknacks','knife','knifes','knives','knob','knobbier','knobby','knobs','knock','knocker','knockers','knocks','knopf','knossos','know','known','knows','knuth','koalas','kobe','koch','koestler','kook','kookier','kookiness','kooks','kooky','koran','korans','kosciusko','kosher','koshers','kossuth','kr','krakow','kramer','krasnoyarsk','krebs','kresge','kris','krishna','krista','kristen','kristi','kristin','kristina','kristine','kroc','krone','kronecker','kroner','krupp','krypton','ks','kublai','kubrick','kuhn','kuibyshev','kusch','kw','kyushu','la','lab','laban','labial','labials','labs','lace','lacerate','lacerates','laceration','lacerations','laces','lacey','lachesis','lacier','lack','lacks','laconic','lacrosse','lactate','lactates','lactation','lactic','lacuna','lacunas','lacy','lady','ladyship','lahore','lain','lair','lairs','laius','lakes','lakota','lana','lanai','lancashire','lance','lancer','lancers','lances','land','lander','landlady','landlubber','landlubbers','landon','landowner','landowners','lands','landsat','landscaper','landscapers','landscapes','landsteiner','lane','lanes','lank','lanker','lankier','lankiness','lanky','lanny','lanolin','lantern','lanterns','lao','laocoon','laos','lap','laplace','lapland','lapp','lapps','laps','lapse','lapses','laptop','laptops','lara','lasagna','lasagnas','lasagne','lasagnes','lascivious','lasciviousness','laser','lasers','lash','lashes','lass','lassen','lasses','lassies','lasso','lassoes','lassos','lastly','latch','latches','late','latency','lateness','lateran','lath','lather','lathers','lathes','laths','latin','latina','latiner','latino','latinos','latins','launch','launcher','launchers','launches','launder','launderer','launderers','launders','laundress','laundresses','laura','laureates','lauren','laurence','laval','lavern','laverne','lavish','lavisher','lavishes','lavishness','lavoisier','lavonne','law','lawn','lawns','lawrence','laws','lawson','lawyer','lawyers','lay','layamon','layer','layers','layla','layoff','layoffs','layover','layovers','layperson','laypersons','lays','li','liar','liars','lib','libation','libations','libby','liberace','liberal','liberalism','liberals','liberate','liberates','liberation','liberties','libertine','libertines','libra','libraries','library','libras','lice','licence','licences','license','licensees','licenses','licentiates','licentious','licentiousness','lichees','lichen','lichens','lichtenstein','lick','licks','lies','lieu','lieutenancy','life','lifeboat','lifeboats','lifeline','lifelines','lifer','lifers','lifespan','lifespans','liker','likes','lila','lilac','lilacs','lilies','lilith','limn','limns','limo','limoges','limos','limousin','limousine','limousines','lin','lina','linage','linchpin','linchpins','lind','lindsey','lindy','line','lineages','lineal','linear','linebacker','linebackers','linen','linens','liner','liners','lines','lineup','lineups','link','linkages','linker','links','linkup','linkups','linnaeus','linus','lion','lioness','lionesses','lions','lip','liposuction','lippi','lips','lipstick','lipsticks','lipton','lira','liras','lire','lisbon','lisp','lisps','listen','listener','listeners','listens','listless','listlessness','litanies','litany','litchi','litchis','lite','literacy','literate','literates','literati','lither','lithosphere','lithospheres','litigate','litigates','litigation','litmus','liver','liveries','livers','livery','lives','livy','lr','lu','luau','luaus','lubavitcher','lubber','lubbers','lubbock','lube','lubes','lubricate','lubricates','lubrication','lucas','luce','lucifer','lucio','lucite','lucius','luck','luckier','luckiness','lucknow','lucks','lucky','lucre','lucretius','lucy','luis','lula','lulu','luna','lunacies','lunacy','lunar','lunatic','lunatics','lunch','luncheon','luncheons','lunches','lunge','lunges','lupin','lupine','lupines','lupins','lupus','lure','lures','luscious','lusciousness','lush','lusher','lushes','lushness','lustier','lustiness','lute','lutes','luther','luvs','md','mg','mn','mo','moat','moats','mob','mobs','moccasin','moccasins','mock','mocker','mockeries','mockers','mockery','mocks','mohacs','mohican','mohicans','moho','moira','moire','moires','moises','moisten','moistens','moistly','molasses','molina','moluccas','mon','mona','monaco','monarch','monarchic','monarchical','monarchies','monarchism','monarchs','monarchy','monastic','monasticism','monastics','monera','money','moneybag','moneybags','monica','monicker','monickers','monies','moniker','monikers','monk','monks','monmouth','mono','monocles','monogamous','monolith','monolithic','monoliths','monongahela','monophonic','monopolies','monopolistic','mons','monsoon','monsoons','montage','montages','montana','montanan','montanans','monte','month','monthlies','months','moo','mooch','moocher','moochers','mooches','moody','moon','moonbeam','moonbeams','mooney','moons','moonscapes','moonshine','moonshines','moore','moos','moose','mop','mopes','mops','moraine','moraines','moralistic','moralities','moran','morass','morasses','moray','morays','more','moreno','moreover','mores','morn','morns','moscow','moses','mosey','moseys','moss','mosses','mossier','mossy','mostly','mote','motes','moth','mother','motherfucker','motherfuckers','motherland','motherlands','motherliness','mothers','moths','motif','motifs','motion','motions','motivates','motivation','motivational','motivations','motives','motlier','mound','mounds','mountain','mountaineer','mountaineers','mountainous','mountains','mountbatten','mountebank','mountebanks','mounties','mourn','mourner','mourners','mourns','mouse','mouser','mousers','mouses','mousey','mousier','mousiness','mousse','mousses','moustache','moustaches','mousy','mouth','mouths','mouthwash','mouthwashes','mover','movers','moves','movies','mow','mower','mowers','mown','mows','mt','n','na','nab','nabisco','nabob','nabobs','nabokov','nabs','nacho','nachos','nacre','nag','nags','nagy','nair','nairobi','naismith','naiver','nam','namath','names','nan','nanak','nancy','nannies','nanny','nanook','nanosecond','nanoseconds','nansen','nantes','nap','napes','naphtali','napier','napkin','napkins','nappier','nappies','nappy','naps','narc','narcissi','narcissism','narcissistic','narcissus','narcissuses','narcosis','narcotic','narcotics','narcs','nark','narks','narrate','narrates','narration','narrations','narratives','narwhal','narwhals','nary','nasal','nasals','nascar','nash','nassau','nasser','nastier','nastiness','nat','natal','nate','nation','national','nationalism','nationalistic','nationalities','nationals','nations','natives','nativities','nattier','naturalism','naturalistic','nature','natures','nauru','nauseates','nauseous','nautical','nautili','nautilus','nautiluses','naval','navarre','naves','navies','navigate','navigates','navigation','navigational','navy','nay','nays','nb','nd','ne','neal','near','nearby','nearer','nearness','nears','neat','neater','neath','neatness','nebula','nebulas','necessaries','necessary','necessitate','necessitates','necessities','neck','necklace','necklaces','neckline','necklines','necks','neckties','necrosis','nefarious','nefariousness','nefertiti','negate','negates','negation','negations','negatives','negev','nehru','neither','neoclassic','neoclassical','neoclassicism','neogene','neolithic','neon','neonatal','neonate','neonates','neophyte','neophytes','neoprene','nepal','nepali','nephew','nephews','nepotism','neptune','nerdy','nerf','nero','nerves','nervier','nervous','nervousness','nervy','nescafe','nestles','nether','netherlander','netherlanders','netherlands','nev','never','nevermore','nevis','nevsky','new','newark','newbies','newborn','newborns','newer','newfoundland','newfoundlands','newness','news','newsboy','newsboys','newsflash','newsier','newspaper','newspapers','newsstand','newsstands','newsy','ni','niacin','nib','nibs','nice','nicene','niceness','nicer','niceties','niche','niches','nichiren','nicholas','nick','nicklaus','nicknack','nicknacks','nicknames','nickolas','nicks','nicobar','nicola','nicolas','nicotine','niftier','nihilism','nihilistic','nikita','nikki','nikolai','nikon','nina','nine','ninepin','ninepins','nines','nineties','ninnies','ninny','nintendo','ninth','ninths','niobe','nip','nipper','nippers','nippier','nippon','nippy','nips','nisei','nita','nite','nites','nixes','no','nobody','noes','noh','noise','noises','noisier','noisiness','noisy','nola','nolan','non','nona','nonalcoholic','nonce','noncooperation','nondescript','none','nonentities','nonessential','nonesuch','nonesuches','nonfat','nonfatal','nonfiction','nonphysical','nonplus','nonpluses','nonplusses','nonpoisonous','nonpolitical','nonprescription','nonprofessional','nonprofessionals','nonproliferation','nonrepresentational','nonseasonal','nonsense','nonsensical','nonsmoker','nonsmokers','nonstick','nontechnical','nonunion','nonuser','nonusers','nonverbal','nonwhite','nonwhites','nook','nooks','noon','noose','nooses','nora','nose','nosegay','nosegays','noses','nosey','nosferatu','nosh','noshes','nosier','nosiness','nosy','notation','notations','notch','notches','note','notebook','notebooks','notepaper','notes','notice','notices','notification','notifications','notifies','notify','notion','notional','notions','noun','nouns','nous','nov','novartis','novas','novice','novices','novitiates','novocain','novocaine','novosibirsk','now','nowhere','nowise','noyce','noyes','np','nth','nu','nub','nubs','nucleus','nucleuses','nukes','nun','nuncio','nuncios','nunki','nunneries','nunnery','nuns','nuptial','nuptials','nutcracker','nutcrackers','nuthatch','nuthatches','o','oar','oars','oases','oasis','oat','oates','oath','oaths','oats','ob','obelisk','obelisks','oberlin','oberon','obese','obey','obeys','obfuscate','obfuscates','obfuscation','oblate','oblation','oblations','obligate','obligates','obligation','obligations','oblige','obliges','obliterate','obliterates','obliteration','oblivion','oblivious','obliviousness','oboes','obscene','obscener','obscenities','obscure','obscures','observation','observational','observations','observer','observers','observes','obsess','obsesses','obsession','obsessions','obsessives','obstacles','obstinacy','obstinate','obtain','obtains','obverse','obverses','obviates','obvious','obviousness','ocarina','ocarinas','occam','occasion','occasional','occasions','occlusion','occlusions','occupancy','occupation','occupational','occupations','occupies','occupy','ocher','ochre','octane','octaves','octavio','odysseus','odyssey','odysseys','of','off','offal','offbeat','offbeats','offenbach','offend','offender','offenders','offends','offense','offenses','offensives','offer','offers','office','officer','officers','offices','official','officials','officiates','officious','officiousness','offs','offshore','offstage','offstages','often','oftener','oh','ohio','oho','ohs','oilier','oiliness','oink','oinks','oise','ok','okla','okra','okras','oks','ola','olaf','olav','olin','oliver','olives','olivier','omnibus','omnibuses','omnibusses','omnipotence','omnipresence','omnivore','omnivores','on','onassis','once','one','oneal','onega','oneness','onerous','ones','onion','onions','onionskin','online','ono','onrush','onrushes','onsager','onshore','onus','onuses','onyxes','oops','opal','opalescence','opals','operas','operates','operatic','operation','operational','operations','operatives','ophiuchus','opiates','opine','opines','opinion','opinions','oppose','opposes','opposite','opposites','opposition','oppress','oppresses','oppression','opprobrious','oprah','opt','optic','optical','optics','option','optional','options','opts','opus','opuses','ora','oracles','oran','orange','oranges','orate','orates','oration','orations','orb','orbison','orbs','ore','oregano','oreo','ores','orestes','ornerier','ornery','os','osborn','oscar','oscars','osceola','oshkosh','osier','osiers','osiris','osmosis','osmotic','osprey','ospreys','ossification','ossifies','ossify','ostentation','ostentatious','osteopath','osteopaths','osteopathy','other','others','otherwise','otiose','otis','ouch','ounce','ounces','outage','outages','outback','outbacks','outbound','outclass','outclasses','outhouse','outhouses','outline','outlines','outlives','outlook','outlooks','oval','ovals','ovaries','ovary','ovation','ovations','over','overages','overambitious','overbalance','overbalances','overbear','overbears','overbite','overbites','overbook','overbooks','overbore','overcautious','overcharge','overcharges','overcoat','overcoats','overcook','overcooks','overgenerous','overhear','overhears','overheat','overheats','overlain','overland','overlap','overlaps','overlay','overlays','overlies','overpass','overpasses','overpay','overpays','overplay','overplays','overpopulate','overpopulates','overpopulation','overpower','overpowers','overprice','overprices','overran','overrate','overrates','overreach','overreaches','overreaction','overreactions','overrun','overruns','overs','overseas','overseer','overseers','oversees','overshoes','overstate','overstates','overstay','overstays','overstep','oversteps','oversupplies','overtakes','overtaxes','overthrew','overtly','overuse','overuses','oviparous','ovulate','ovulates','ovulation','ow','owes','owlish','own','owner','owners','ownership','owns','oxen','p','pa','paar','pace','paces','pacheco','pacific','pacification','pacifier','pacifiers','pacifies','pacifism','pacify','pacino','pack','packages','packer','packers','packs','pagan','paganini','paganism','pagans','page','pager','pagers','pages','paginate','paginates','pagination','pahlavi','paige','pain','paine','pains','paintbrush','paintbrushes','pair','pairs','pairwise','pakistan','pakistani','pakistanis','pal','palace','palaces','palatal','palatals','palate','palates','palatial','palau','palaver','palavers','paler','palermo','pales','palestine','palikir','palimony','palliates','palliation','palliatives','palpate','palpates','palpation','palpitate','palpitates','palpitation','palpitations','pals','palsies','palsy','pam','pamirs','pampas','pamper','pampers','pan','panaceas','panache','panamas','panasonic','pancakes','pancreas','pancreases','pancreatic','pandas','pander','panderer','panderers','panders','pandora','pane','panes','panic','panickier','panicky','panics','panier','paniers','pannier','panniers','panoplies','pans','pansies','pansy','panther','panthers','panties','pap','papa','papacies','papacy','papal','papas','papaw','papaws','papayas','paper','paperback','paperbacks','paperboy','paperboys','papers','papery','papoose','papooses','paps','papyrus','papyruses','par','parabola','parabolas','parabolic','parachute','parachutes','paraffin','paragon','paragons','paragraph','paragraphs','parallaxes','paralyses','paralysis','paralytic','paralytics','paramaribo','paraphrase','paraphrases','paraprofessional','paraprofessionals','parasite','parasites','parasitic','parch','parcheesi','parches','pare','parentage','parentheses','parenthesis','pares','paris','parish','parishes','parishioner','parishioners','park','parkas','parker','parkinson','parks','parlance','parlay','parlays','parnassus','parochial','parochialism','parody','pars','parse','parsec','parsecs','parser','parses','parsi','parsifal','parsimonious','parsimony','parsnip','parsnips','parson','parsonage','parsonages','parsons','partaker','partakers','partakes','partial','partials','participate','participates','participation','participial','particles','particulate','particulates','parties','partition','partitions','partly','pas','pascal','paschal','pashas','pass','passages','passbook','passbooks','passer','passerby','passersby','passes','passion','passionate','passions','passives','passover','passovers','pasta','pastas','paste','pastern','pasterns','pastes','pastiche','pastiches','pastier','pasties','pat','patch','patches','patchier','patchiness','patchy','pate','patently','paternal','paternalism','paternalistic','paterson','pates','path','pathetic','pathogen','pathogenic','pathogens','pathos','paths','patina','patinas','patine','patio','patios','patna','patois','pats','patsies','patsy','pattern','patterns','patti','patties','paula','pauli','pauline','paunch','paunches','paunchier','paunchy','pauper','pauperism','paupers','pause','pauses','paves','pavilion','pavilions','paw','pawn','pawnbroker','pawnbrokers','pawns','pawnshop','pawnshops','pawpaw','pawpaws','paws','pay','paycheck','paychecks','payer','payers','payne','payoff','payoffs','paypal','pays','pb','pd','per','perambulate','perambulates','percales','perceives','percentage','percentages','perception','perceptions','perceptual','perch','percheron','perches','percival','percolate','percolates','percolation','percussion','percy','perfection','perfectionism','perfections','perfectly','perfidy','perforate','perforates','perforation','perforations','pericles','perigees','perihelion','perihelions','peripatetic','peripatetics','peripheral','peripherals','peripheries','periphery','periphrases','periphrasis','periscopes','perish','perishes','perk','perkier','perkiness','perkins','perks','perky','pernicious','peron','peroration','perorations','persecute','persecutes','persecution','persecutions','persephone','persepolis','perseus','perseveres','persiflage','persistence','persistently','person','persona','personage','personages','personal','personalities','personals','personification','personifications','personifies','personify','persons','perspicacious','perspicuous','perspiration','perspire','perspires','persuasion','persuasions','pertain','pertains','perth','pertinacious','pertinence','pertly','peru','perusal','perusals','peruse','peruses','perverse','perverseness','perversion','perversions','peskier','pesky','peso','pesos','pestles','ph','phalli','phallic','phallus','phalluses','pharisees','pharyngeal','pharynges','pharynxes','phases','pheromone','pheromones','phial','phials','philander','philanderer','philanderers','philanders','philatelic','philip','philippic','philippics','philippine','philippines','philips','philistine','philistines','phipps','phish','phisher','phishers','phobias','phobic','phobics','phobos','phone','phones','phonetic','phonetics','phoney','phoneys','phonic','phonics','phonier','phonies','phoniness','phony','phosphates','phosphorescence','phosphorus','phrasal','phrase','phrases','phyla','physic','physical','physicals','physics','pi','pica','picasso','picayune','pick','picker','pickers','pickier','picks','pickup','pickups','pickwick','picky','picnic','picnicker','picnickers','picnics','pier','pierce','pierces','pierre','piers','pies','pigeon','pigeons','piker','pikers','pikes','pilaf','pilaff','pilaffs','pilafs','pilate','pilates','pilau','pilaus','pilaw','pilaws','pin','pinafore','pinafores','pinatubo','pincer','pincers','pinch','pinches','pincus','pincushion','pincushions','pindar','pine','pines','pinfeather','pinfeathers','pinion','pinions','pink','pinker','pinkies','pinkish','pinks','pinky','pinnacles','pinnate','pinocchio','pinprick','pinpricks','pins','pinup','pinups','pioneer','pioneers','pious','pip','piper','pipers','pipes','pippin','pippins','pips','piracy','piraeus','piranhas','pirate','pirates','piratical','pis','pisces','piss','pissaro','pisses','pistachio','pistachios','pita','pitch','pitcher','pitchers','pitches','piteous','pith','pithier','pithy','pities','pius','pkwy','placate','placates','placation','place','placebo','placebos','placenta','placentas','placer','placers','places','plaice','plain','plainclothes','plainer','plainness','plains','plaintiff','plaintiffs','plan','planar','planck','plane','planes','plank','planks','planner','planners','plans','plantain','plantains','plantation','plantations','plastic','plasticine','plastics','plate','plateau','plateaus','platen','platens','plates','plath','play','playback','playbacks','playboy','playboys','player','players','playhouse','playhouses','playoff','playoffs','plays','playstation','pliers','plies','plinth','plinths','pliny','pliocene','pluck','pluckier','pluckiness','plucks','plucky','plunder','plunderer','plunderers','plunders','plunge','plunges','plunk','plunks','pluralism','pluralistic','pluralities','plus','pluses','plush','plusher','plushier','plushy','plusses','pm','po','poach','poacher','poachers','poaches','pocahontas','pock','pocks','pocono','poconos','poesy','poi','pointier','pointless','pointlessness','poise','poises','poison','poisoner','poisoners','poisonous','poisons','poisson','poitier','poker','pokers','pokes','pokier','poky','poland','polanski','police','polices','policies','policy','polio','polios','polish','polisher','polishers','polishes','polite','politeness','politesse','politic','political','politico','politicoes','politicos','politics','polities','pomona','ponce','poncho','ponchos','pond','ponder','ponderous','ponders','ponds','pone','pones','ponies','pontiac','pontiff','pontiffs','pontifical','pontificate','pontificates','pony','pooch','pooches','pooh','poohs','poona','poop','poops','poorhouse','poorhouses','pop','popcorn','popes','poplin','popover','popovers','poppa','poppas','popper','poppies','poppins','poppy','poppycock','pops','populace','populaces','populate','populates','population','populations','populism','pore','pores','porfirio','porn','porno','pose','poser','posers','poses','posh','posher','posies','position','positional','positions','positives','positivism','posse','posses','possess','possesses','possession','possessions','possessives','possibilities','postage','posthaste','posthumous','postmark','postmarks','posy','potash','potency','potentate','potentates','potential','potentialities','potentials','pothook','pothooks','potion','potions','potluck','potlucks','pouch','pouches','pounce','pounces','pound','pounds','poussin','power','powerboat','powerboats','powerhouse','powerhouses','powers','powwow','powwows','poxes','practical','practicalities','practicals','practice','practices','practise','practises','practitioner','practitioners','prairies','praise','praises','praline','pralines','pram','prance','prancer','prancers','prances','prank','pranks','prate','prates','prattles','prawn','prawns','pray','prayer','prayers','prays','preach','preacher','preachers','preaches','preachier','preachy','prearrange','prearranges','precarious','precaution','precautionary','precautions','precept','precepts','precious','preciousness','precipice','precipices','precipitate','precipitates','precipitation','precipitations','precise','preciseness','preciser','precises','precision','preclusion','precocious','precociousness','preconceives','preconception','preconceptions','precondition','preconditions','prefaces','prefer','preference','preferences','preferential','prefers','prefixes','preheat','preheats','prelate','prelates','premonition','premonitions','prenatal','prentice','preoccupation','preoccupations','preoccupies','preoccupy','prep','prepackages','preparation','preparations','prepare','prepares','prepay','prepays','preponderates','preposition','prepositional','prepositions','prepossess','prepossesses','preppier','preppies','preppy','preps','prerogatives','pres','presages','prescribe','prescribes','prescription','prescriptions','presence','presences','presentation','presentations','presently','preservation','preservatives','preserver','preservers','preserves','preshrank','preshrunk','press','presses','pressure','pressures','prestige','presuppose','presupposes','presupposition','presuppositions','pretence','pretences','pretend','pretender','pretenders','pretends','pretense','pretenses','pretension','pretensions','pretentious','pretentiousness','prevaricate','prevaricates','prevarication','prevarications','previous','prevues','prewar','prey','preys','priam','price','prices','pricey','pricier','prick','pricklier','pricks','pricy','pries','priestess','priestesses','priestlier','priestly','primness','prince','princelier','princes','princess','princesses','principal','principalities','principals','prioress','prioresses','prism','prismatic','prisms','prison','prisoner','prisoners','prisons','prissier','prissiness','prissy','pristine','prius','privacy','privater','privates','privation','privations','privier','privies','privy','pro','probabilistic','probabilities','probate','probates','probation','probationary','probationer','probationers','probe','probes','proboscis','proboscises','process','processes','procession','processional','processionals','processions','proclamation','proclamations','proclivities','procrastinate','procrastinates','procrastination','procreates','procreation','procrustes','procure','procures','procyon','prof','profess','professes','profession','professional','professionalism','professionals','professions','proffer','proffers','profiteer','profiteers','profligacy','profligate','profligates','profound','profounder','profs','profundities','profuse','profusion','profusions','progeny','prohibition','prohibitions','proliferate','proliferates','proliferation','prolific','promo','promos','promote','promotes','promotion','promotional','promotions','prone','proneness','pronoun','pronounce','pronounces','pronouns','pronunciation','pronunciations','proof','proofs','prop','propagate','propagates','propagation','propane','proper','properer','properties','prophecies','prophecy','prophesies','prophesy','prophetess','prophetesses','prophetic','prophylactic','prophylactics','propitiates','propitiation','propitious','proposal','proposals','propose','proposer','proposes','proposition','propositional','propositions','propound','propounds','props','prorate','prorates','pros','proscribe','proscribes','proscription','proscriptions','prose','prosecute','prosecutes','prosecution','prosecutions','proserpine','prosier','prosody','prosper','prosperous','prospers','prostate','prostates','prostheses','prosthesis','prosy','protection','protections','protein','proteins','protestantism','protestantisms','protestation','protestations','proteus','proverb','proverbial','proverbs','proves','province','provinces','provincial','provincialism','provincials','provision','provisional','provisions','proviso','provisoes','provisos','provo','provocation','provocations','provokes','prow','prowess','prows','prune','prunes','pry','psych','psyche','psyches','psychic','psychical','psychics','psycho','psychogenic','psychokinesis','psychopath','psychopathic','psychopaths','psychos','psychoses','psychosis','psychotic','psychotics','psychs','pt','ptah','pu','pub','pubescence','pubic','public','publican','publicans','publication','publications','publicly','publish','publisher','publishers','publishes','pubs','puccini','puck','pucker','puckers','puckish','pucks','puff','puffer','puffier','puffin','puffiness','puffins','puffs','puffy','pukes','pulaski','pun','punch','punches','punchier','punchline','punchy','punctilious','pungency','pungently','punic','punier','punish','punishes','punk','punker','punks','puns','puny','pup','pupa','pupal','pupas','puppies','puppy','pups','purana','purblind','pure','purees','pureness','purus','pus','pusey','push','pusher','pushers','pushes','pushier','pushiness','pushkin','pushover','pushovers','pushup','pushups','pushy','puss','pusses','pussier','pussies','pussy','pussycat','pussycats','putin','pynchon','pyre','pyrenees','pyres','pyrexes','pythagoras','pythias','python','pythons','pyxes','ra','rabat','rabbi','rabbinate','rabbinical','rabbis','rabelais','rabies','rabin','raccoon','raccoons','race','racer','racers','races','racial','racier','racine','raciness','racism','rack','racks','racoon','racoons','racy','raffish','raga','ragas','rage','rages','rain','rainbow','rainbows','raincoat','raincoats','rainier','rains','rainwater','rainy','raise','raises','raisin','raisins','rakes','rakish','rakishness','ramon','ramona','ramos','ran','ranch','rancher','ranchers','ranches','rand','randal','randi','randier','randomness','randy','range','ranges','rank','ranker','rankin','rankine','rankness','ranks','ransack','ransacks','rap','rapacious','rapaciousness','rapes','rapier','rapiers','rapine','rapper','rappers','raps','rapscallion','rapscallions','rapt','rapture','raptures','rare','rarefies','rarefy','rareness','rares','rasalgethi','rascal','rascals','rash','rasher','rashers','rashes','rashness','rasmussen','rasp','raspier','rasps','rasputin','raspy','rastaban','rate','rates','rather','ratification','ratifies','ratify','ratio','ration','rational','rationales','rationalism','rationalistic','rationals','rations','ratios','ratliff','raucous','raucousness','raunchier','raunchiness','raunchy','ravages','raves','ravine','ravines','ravioli','raviolis','ravish','ravishes','raw','rawalpindi','rawer','rawness','ray','rayban','rayburn','raymond','rayon','rays','rb','re','reach','reaches','reaction','reactionaries','reactionary','reactions','reactivates','reactivation','real','realer','reales','realism','realistic','realities','reals','ream','reamer','reamers','reams','rear','rearrange','rearranges','rears','reason','reasoner','reasons','reassess','reassesses','reassurance','reassurances','reassure','reassures','reba','rebate','rebates','rebecca','rebind','rebinds','rebirth','rebirths','reborn','rebound','rebounds','rebuff','rebuffs','rebukes','rebus','rebuses','recantation','recantations','recap','recaps','recapture','recaptures','receipt','receipts','receiver','receivers','receivership','receives','recently','receptacles','reception','receptions','recess','recesses','recession','recessional','recessionals','recessions','recessives','recharge','recharges','recheck','rechecks','recife','recipes','reciprocal','reciprocals','reciprocate','reciprocates','reciprocation','recitation','recitations','recitatives','recite','recites','reckon','reckons','reclamation','reclassifies','reclassify','recline','recliner','recliners','reclines','recluse','recluses','reconciliation','reconciliations','recondite','recondition','reconditions','recopies','recopy','recoup','recoups','recover','recoveries','recovers','recovery','recreates','recreation','recreational','recreations','recta','rectification','rectifications','rectifier','rectifiers','rectifies','rectify','rectilinear','recuperates','recuperation','recycles','reestablish','reestablishes','ref','refashion','refashions','refer','referees','reference','references','referential','refers','refinance','refinances','refine','refiner','refineries','refiners','refinery','refines','refinish','refinishes','refocus','refocuses','refocusses','reforestation','refraction','refrain','refrains','refresh','refresher','refreshers','refreshes','refrigerate','refrigerates','refrigeration','refs','refuge','refugees','refuges','refund','refunds','refurbish','refurbishes','refurnish','refurnishes','refusal','refusals','refuse','refuses','refutation','refutations','refute','refutes','regain','regains','regencies','regency','regenerate','regenerates','regeneration','rehash','rehashes','rehearsal','rehearsals','rehearse','rehearses','reheat','reheats','rehire','rehires','reich','rein','reincarnate','reincarnates','reincarnation','reincarnations','reins','reinstate','reinstates','reis','reissues','reiterate','reiterates','reiteration','reiterations','relapse','relapses','relate','relates','relation','relational','relations','relationship','relationships','relatives','relativistic','relaxes','relay','relays','relic','relics','relies','relish','relishes','relives','reluctance','reluctantly','remote','remoteness','remotes','removal','removals','remover','removers','removes','rena','renal','renames','renascence','renascences','rend','render','renders','rendition','renditions','rends','rene','renege','reneges','renew','renewal','renewals','renews','reno','renoir','renounce','renounces','renovates','renovation','renovations','renown','renunciation','renunciations','reoccupies','reoccupy','rep','repackages','repair','repairs','reparation','reparations','repay','repays','repercussion','repercussions','rephrase','rephrases','replace','replaces','replay','replays','replica','replicas','replicate','replicates','replication','replications','replies','repose','reposes','repossess','repossesses','repossession','repossessions','reprehend','reprehends','representation','representational','representations','representatives','repress','represses','repression','repressions','reprisal','reprisals','reprise','reprises','reproach','reproaches','reprobate','reprobates','reprocess','reprocesses','reproof','reproofs','reproves','reps','republic','republican','republicanism','republicans','republics','republish','republishes','reputation','reputations','repute','reputes','reran','rerun','reruns','resales','rescind','rescinds','rescission','rescuer','rescuers','rescues','research','researcher','researchers','researches','resend','reservation','reservations','reserves','reservoir','reservoirs','resin','resinous','resins','resistance','resistances','resolute','resoluteness','resolution','resolutions','resonance','resonances','resonantly','resonate','resonates','resound','resounds','respiration','respire','respires','respite','respites','respond','responds','response','responses','responsibilities','restate','restates','restless','restlessness','resupplies','resurfaces','resuscitate','resuscitates','resuscitation','retain','retainer','retainers','retains','retakes','retaliates','retaliation','retaliations','retch','retches','retention','rethink','rethinks','reticence','retina','retinal','retinas','retinues','retire','retirees','retires','reuben','reunification','reunifies','reunify','reunion','reunions','reunite','reunites','reuse','reuses','reuther','rev','revaluation','revaluations','revalues','revamp','revamps','reverberate','reverberates','reverberation','reverberations','reveres','reveries','reversal','reversals','reverse','reverses','reversion','revery','revise','revises','revision','revisions','revival','revivals','revives','revivification','revivifies','revivify','revocation','revocations','revokes','revolution','revolutionaries','revolutionary','revolutions','revs','revues','rewind','rewinds','rewire','rewires','rewound','reyes','reyna','rh','rhine','rhineland','rhino','rhinoceri','rhinoceros','rhinoceroses','rhinos','rho','rhone','rhubarb','rhubarbs','rn','ru','rub','rubaiyat','rubber','rubberier','rubberneck','rubbernecks','rubbers','rubbery','rubbish','rubbishes','rubbishy','rube','ruben','rubens','rubes','rubicon','rubicund','rubier','rubies','rubik','rubin','rubinstein','rubric','rubrics','rubs','ruby','ruchbah','rucksack','rucksacks','ruckus','ruckuses','rudy','rues','ruff','ruffs','rufus','ruin','ruination','ruinous','ruins','run','runaround','runarounds','rundown','rundowns','rune','runes','runner','runners','runnier','runny','runoff','runoffs','runs','runyon','rupture','ruptures','ruse','ruses','rush','rushes','rushmore','rusk','ruskin','rusks','russ','russo','rustic','rustics','rustier','rustiness','rustler','rustlers','rustles','rutabaga','rutabagas','rutan','ruth','s','sac','saccharin','saccharine','sacco','sachs','sack','sackcloth','sacks','sacs','sag','sagacious','sagas','sager','sages','saginaw','sago','sags','saguaro','saguaros','sal','salacious','salaciousness','salami','salamis','salaries','salary','salas','salerno','sales','salesclerk','salesclerks','salesperson','salespersons','salinas','saline','salines','salish','salivary','salivates','salivation','salk','sallies','salmon','salmons','salon','salons','saloon','saloons','salsas','saltier','saltine','saltines','saltiness','salubrious','salutation','salutations','salute','salutes','salvages','salvation','salvatore','salver','salvers','salves','salvo','salvoes','salvos','sam','samar','samaritan','samaritans','samba','sambas','sames','samovar','samovars','sampan','sampans','sampson','samson','samsonite','samurai','sarasota','saratov','sarcasm','sarcasms','sarcastic','sarcophagi','sarcophagus','sarcophaguses','sargasso','sari','saris','sarnoff','sars','sash','sashes','saskatoon','sass','sasses','sassier','sassoon','sassy','sat','sates','satiates','satin','satiny','satire','satires','satirical','satisfaction','satisfactions','satisfies','satisfy','satrap','satraps','saturate','saturates','saturation','saturn','saturnine','sauce','saucepan','saucepans','saucer','saucers','sauces','saucier','sauciness','saucy','sauna','saunas','saunders','saundra','sausages','saussure','sb','sc','scab','scabbier','scabby','scabies','scabrous','scabs','scalar','scalars','scalawag','scalawags','scales','scalier','scallion','scallions','scalp','scalper','scalpers','scalps','scaly','scam','scamp','scamper','scampers','scampi','scampies','scamps','scams','scan','scandal','scandalous','scandals','scanner','scanners','scans','scansion','scantier','scanties','scantiness','scapula','scapulas','scar','scarab','scarabs','scaramouch','scarce','scarceness','scarcer','scare','scarecrow','scarecrows','scares','scarf','scarfs','scarier','scarifies','scarify','scars','scarves','scary','scat','scats','scatterbrain','scatterbrains','scenario','scenarios','scene','scenery','scenes','scenic','scepter','scepters','scheat','schenectady','schick','schism','schismatic','schismatics','schisms','schnapps','scholastic','schooner','schooners','schtick','schticks','schuss','schusses','schwas','schwinn','sciatic','sciatica','scion','scions','scipio','sclerosis','sclerotic','scoff','scofflaw','scofflaws','scoffs','scoliosis','sconce','sconces','scone','scones','scoop','scoops','scopes','score','scores','scorn','scorns','scotch','scotches','scotchs','scow','scows','scram','scrams','scrap','scrapbook','scrapbooks','scraper','scrapers','scrapes','scrappier','scrappy','scraps','scratch','scratches','scratchier','scratchiness','scratchy','scrawnier','scrawny','scream','screams','screw','screwier','screws','screwy','scribe','scribes','scribner','scrip','scrips','script','scripts','scripture','scriptures','scrofula','scrooge','scrooges','scrota','scrounge','scrounges','scrub','scrubber','scrubbers','scrubbier','scrubby','scrubs','scruff','scruffier','scruffs','scruffy','scrunch','scrunches','scrutiny','scuba','scubas','scuff','scuffs','scupper','scuppers','scurf','scurfier','scurfy','scythes','se','seal','sealer','sealers','seals','sealskin','seam','seamier','seams','seamy','sear','search','searcher','searchers','searches','sears','seas','seascapes','seashore','seashores','seasick','seasickness','season','seasonal','seasons','seat','seats','sebaceous','sec','secession','seclusion','seconal','second','secondaries','secondary','seconds','secrecy','secrete','secretes','secretion','secretions','secretly','secs','section','sectional','sectionalism','sectionals','sections','secure','secures','seer','seers','seersucker','sees','sega','seiko','seine','seismic','sen','senate','senates','send','sender','senders','sends','seneca','senecas','senna','sennacherib','sensation','sensational','sensationalism','sensations','sense','senses','sensibilities','sensitives','sensitivities','sensual','sensuous','sensuousness','sentence','sentences','sententious','sepal','sepals','separate','separates','separation','separations','separatism','sepoy','sepsis','sept','septa','septic','sera','seraglio','seraglios','serapes','seraph','seraphic','seraphs','serb','serbs','sere','serena','serene','sereneness','serener','serengeti','serer','serf','serfs','serge','sergei','serial','serials','series','serious','seriousness','sermon','sermons','serous','serra','serrano','server','servers','serves','service','services','servo','servos','sesames','session','sessions','setback','setbacks','seth','seuss','sever','several','severer','severn','severs','severus','sew','sewer','sewers','sewn','sews','sexes','sh','shack','shacks','shag','shags','sham','shames','shampoo','shampoos','shams','shares','shari','sharif','shark','sharks','sharkskin','sharon','sharp','sharper','sharpers','sharpness','sharps','shasta','shat','shaula','shaun','shauna','she','shear','shearer','shearers','shears','sheath','sheathe','sheathes','sheaths','sheba','shebeli','sheer','sheerer','sheers','sheik','sheikh','sheikhs','sheiks','sheila','shenanigan','shenanigans','sheratan','sheraton','sheri','sheriff','sheriffs','sherpa','shes','shevat','shies','shiftier','shiftiness','shiftless','shiftlessness','shikoku','shin','shinbone','shinbones','shine','shiner','shiners','shines','shinier','shininess','shinnies','shinny','shins','shiny','ship','shipmates','shipper','shippers','ships','shipwreck','shipwrecks','shire','shires','shirk','shirker','shirkers','shirks','shiver','shivers','shivery','shoal','shoals','shock','shocker','shockers','shockproof','shocks','shoes','shoeshine','shoeshines','shone','shoo','shook','shoon','shoos','shop','shopper','shoppers','shops','shoptalk','shopworn','shore','shoreline','shorelines','shores','shorn','shoshone','shostakovitch','shoves','show','showboat','showboats','showcase','showcases','shower','showers','showery','showier','showiness','shown','showoff','showoffs','showplace','showplaces','shows','showy','shrank','shrek','shrew','shrewish','shrews','shrub','shrubberies','shrubbery','shrubbier','shrubby','shrubs','shrunk','shtick','shticks','shtik','shtiks','shuck','shucks','shuckses','shula','shun','shuns','shush','shushes','shy','shyer','shyness','si','siam','sibelius','sic','sick','sicker','sicklier','sickness','sicknesses','sicks','sics','sierpinski','sierra','sierras','siesta','siestas','sikh','sikhism','sikhs','silage','silas','silica','silicate','silicates','siliceous','silicious','silicon','silicone','silicosis','simon','simone','sin','sinai','sinatra','since','sincere','sincerer','sinclair','sindhi','sine','sinecure','sinecures','sinew','sinews','sinewy','singapore','singe','singes','sink','sinker','sinkers','sinks','sinner','sinners','sins','sinuous','sinus','sinuses','sinusitis','sip','siphon','siphons','sips','sir','sire','siren','sirens','sires','sirius','sirocco','siroccos','sirs','sirup','sirups','sis','sisal','sises','sissier','sissies','sissy','sistine','sisyphus','site','sites','sixes','skater','skaters','skates','ski','skier','skiers','skies','skiff','skiffs','skin','skinner','skinnier','skinniness','skinny','skins','skip','skipper','skippers','skippy','skips','skis','skivvies','skivvy','skunk','skunks','sky','skycap','skycaps','skylab','skyline','skylines','skyscraper','skyscrapers','slab','slabs','slack','slacker','slackers','slackness','slacks','slain','slakes','slander','slanderer','slanderers','slanderous','slanders','slap','slapdash','slaps','slapstick','slash','slashes','slate','slates','slather','slathers','slav','slaver','slavers','slavery','slaves','slavic','slavish','slavonic','slavs','slaw','slay','slayer','slayers','slays','slice','slicer','slicers','slices','slick','slicker','slickers','slickness','slicks','slier','slimness','slink','slinkier','slinks','slinky','slip','slipcover','slipcovers','slippage','slippages','slipper','slipperier','slipperiness','slippers','slippery','slips','slither','slithers','slithery','sliver','slivers','slues','sluice','sluices','slunk','slush','slushier','slushy','sm','smack','smacker','smackers','smacks','smallish','smarten','smartens','smartly','smash','smashes','smirch','smirches','smirk','smirks','smirnoff','smite','smites','smith','smithies','smiths','smithson','smithy','smock','smocks','smoker','smokers','smokes','smokestack','smokestacks','smokier','smokiness','smoky','smooch','smooches','smooth','smoother','smoothes','smoothness','smooths','smote','smother','smothers','sn','snack','snacks','snafu','snafus','snag','snags','snakes','snakier','snaky','snap','snapper','snappers','snappier','snappish','snappy','snaps','snare','snares','snatch','snatches','sneer','sneers','snicker','snickers','sniff','sniffs','snip','sniper','snipers','snipes','snippier','snippy','snips','snitch','snitches','snob','snobbery','snobbier','snobbish','snobbishness','snobby','snobs','snooker','snoop','snooper','snoopers','snoopier','snoops','snoopy','snootier','snootiness','snore','snores','snow','snowbound','snowflakes','snowier','snows','snowshoes','snowy','snub','snubs','snuck','snuff','snuffboxes','snuffer','snuffers','snuffs','so','soar','soars','sob','sober','soberer','soberness','sobers','sobs','soccer','social','socialism','socialistic','socialite','socialites','socials','sociopath','sociopaths','sock','socks','socrates','socratic','sofas','soften','softener','softeners','softens','softies','softly','soho','solace','solaces','soli','solicitation','solicitations','solis','solitaire','solitaires','solution','solutions','somnambulism','son','sonar','sonars','sonata','sonatas','sondra','sonic','sonnies','sonny','sons','sony','soon','sooner','sooth','soothes','sootier','sop','sophism','sophisticate','sophisticates','sophistication','sophocles','sophomore','sophomores','soppier','soppy','soprano','sopranos','sops','sopwith','sorbonne','sore','soreness','sores','sos','sound','sounder','soundness','soundproof','soundproofs','sounds','soup','soupier','soups','soupy','sourness','souse','souses','south','southampton','southbound','southerlies','southern','southerner','southerners','southerns','southpaw','southpaws','souths','southwestern','sow','sower','sowers','sown','sows','soy','spa','space','spaces','spaceship','spaceships','spacewalk','spacewalks','spacey','spacial','spacier','spacious','spaciousness','spacy','spahn','spain','spam','span','spanish','spank','spanks','spanner','spanners','spans','spar','spare','spareness','sparer','spareribs','spares','spark','sparks','spars','sparse','sparseness','sparser','sparta','spartacus','spartan','spartans','spas','spasm','spasms','spastic','spastics','spat','spate','spates','spatial','spats','spatula','spatulas','spawn','spawns','spay','spays','sphere','spheres','spherical','sphinges','sphinxes','spica','spice','spices','spicier','spiciness','spicy','spies','spiffier','spiffy','spikes','spikier','spiky','spin','spinach','spinal','spinals','spindlier','spine','spines','spinier','spinnaker','spinnakers','spinner','spinners','spinoff','spinoffs','spins','spiny','spiral','spirals','spire','spireas','spires','spiritless','spiro','spite','spites','splash','splashes','splashier','splashy','splay','splays','splice','splicer','splicers','splices','spline','splines','spock','spoilage','spokes','spokesperson','spokespersons','spoliation','sponge','sponges','spontaneous','spoof','spoofs','spook','spookier','spooks','spooky','spoon','spoonerism','spoonerisms','spoons','spore','spores','spotless','spotlessness','spouse','spouses','sprain','sprains','sprat','sprats','spray','sprayer','sprayers','sprays','sprees','sprier','sprite','sprites','spruce','sprucer','spruces','spry','spryer','spryness','spumone','spumoni','spun','spunk','spunkier','spunky','spurn','spurns','spy','sr','srinagar','stab','stabs','staccati','staccato','staccatos','stacey','staci','stack','stacks','stacy','staff','staffer','staffers','staffs','stage','stagecoach','stagecoaches','stages','stain','stains','stair','staircase','staircases','stairs','stakes','stalactite','stalactites','stalin','stan','stance','stances','stanch','stancher','stanches','stanchion','stanchions','stand','standby','standbys','standish','standoff','standoffish','standoffs','stands','stanislavsky','stank','staph','starbucks','stare','stares','starfish','starfishes','stash','stashes','state','statehouse','statehouses','statelier','stateliness','staten','states','static','station','stationary','stationer','stationers','stationery','stations','statistic','statistical','statistics','staubach','staunch','stauncher','staunches','staves','stay','stays','steal','steals','stealth','stealthier','stealthy','steam','steamboat','steamboats','steamer','steamers','steamier','steams','steamship','steamships','steamy','steer','steers','stein','steinbeck','steiner','steins','stench','stenches','stendhal','step','stepbrother','stepbrothers','stepfather','stepfathers','stephen','stephens','stephenson','stepmother','stepmothers','steppes','steps','stepson','stepsons','stereo','stereophonic','stereos','stereoscopes','stern','sterner','sternness','sterno','sterns','stethoscopes','steuben','stew','stews','stick','sticker','stickers','stickier','stickies','stickiness','stickpin','stickpins','sticks','stickup','stickups','sticky','sties','stiff','stiffen','stiffener','stiffeners','stiffens','stiffer','stiffness','stiffs','stine','stink','stinker','stinkers','stinks','stipulate','stipulates','stipulation','stipulations','stitch','stitches','sub','subaru','subclass','subconscious','subcutaneous','suborn','subornation','suborns','subroutine','subroutines','subs','subscribe','subscriber','subscribers','subscribes','subscript','subscription','subscriptions','subscripts','subsection','subsections','subsidy','subsistence','subsonic','subspace','substance','substances','substantial','substantiates','substantiation','substantiations','substantives','substation','substations','subterfuge','subterfuges','subtitles','subtler','subtly','suburb','suburbs','subversion','subversives','success','successes','succession','successions','succinctly','succotash','such','suck','sucker','suckers','sucks','sucre','sucrose','suction','suctions','sues','suffer','sufferance','sufferer','sufferers','suffers','suffice','suffices','suffixes','suffocate','suffocates','suffocation','suffragan','suffragans','suffrage','suffuse','suffuses','suffusion','sufi','sufism','sui','suitcases','suite','suites','sukarno','sukkoth','sukkoths','sulawesi','sumner','sumo','sumter','sun','sunbathe','sunbather','sunbathers','sunbathes','sunbeam','sunbeams','sunburn','sunburns','sundas','sunder','sunders','sundial','sundials','sundown','sundowns','sunfish','sunfishes','sunk','sunni','sunnier','sunny','suns','sunshine','suntan','suntans','sunup','sup','super','superb','superber','supercharge','supercharges','supercilious','superficial','superfluous','superfund','superintend','superintends','superlatives','supernovas','superpower','superpowers','supers','superscript','superscripts','supersonic','superstition','superstitions','superstitious','supertanker','supertankers','supervise','supervises','supervision','supervisions','supine','supper','suppers','supplicate','supplicates','supplication','supplications','supplier','suppliers','supplies','suppose','supposes','supposition','suppositions','suppress','suppresses','suppression','suppurate','suppurates','suppuration','supranational','sups','sure','surefire','sureness','sureties','surf','surfaces','surfer','surfers','surfs','surnames','sushi','suspicion','suspicions','suspicious','sustain','sustains','sustenance','sutherland','swag','swags','swam','swami','swamis','swamp','swampier','swamps','swampy','swarthier','swarthy','swash','swashes','swastikas','swat','swatch','swatches','swath','swathe','swathes','swaths','swats','swerves','swiftly','swine','swines','swinish','swipes','swirlier','swish','swisher','swishes','swiss','swisses','switch','switchback','switchbacks','switcher','switches','swoon','swoons','swoop','swoops','swop','swops','swore','sworn','sybarite','sybarites','sybaritic','sycamore','sycamores','sykes','synapse','synapses','sync','synch','synches','synchs','syncopate','syncopates','syncopation','syncs','syndicate','syndicates','syndication','synge','synonymous','synopses','synopsis','syntactic','syntactical','syntheses','synthesis','syphilis','syphilitic','syphilitics','syphon','syphons','syracuse','syrup','syrups','syrupy','ta','tab','tabasco','tabbies','tabby','tabernacles','taboo','taboos','tabs','tabu','tabulate','tabulates','tabulation','tabus','tacitly','tack','tackier','tackiness','tacks','tacky','taco','tacos','tactic','tactical','tactics','tactless','tactlessness','taffeta','taffies','taffy','tahiti','taine','taker','takers','takes','taliban','taliesin','tan','tanager','tanagers','taney','tangential','tank','tanker','tankers','tanks','tanner','tanneries','tanners','tannery','tannin','tans','tansy','tantalus','tao','taoism','taoisms','tap','taper','tapers','tapes','tapioca','tapir','tapirs','taps','tara','tarantino','tare','tares','tarnish','tarnishes','task','tasks','tass','taste','tastes','tastier','tastiness','tate','taurus','tauruses','tautly','tavern','taverns','tawney','tawnier','tawny','taxes','tb','tbilisi','tc','teach','teacher','teachers','teaches','teacup','teacups','teal','teals','team','teams','tear','teargas','teargases','teargasses','tearier','tears','teary','teas','teaser','teasers','teases','teaspoon','teaspoons','teat','teats','technical','technicalities','technocracy','technocrat','technocrats','techs','tees','teheran','tehran','ten','tenacious','tenancies','tenancy','tend','tender','tenderer','tenderness','tenders','tendinitis','tendon','tendonitis','tendons','tends','tennis','tennyson','tenon','tenons','tenpin','tenpins','tens','tense','tenseness','tenser','tenses','tension','tensions','tentacles','tenth','tenths','tenuous','tenuousness','tenure','tenures','terabyte','terabytes','terence','tern','terns','tesla','tess','testate','testates','testes','testicles','testier','testifies','testify','testimonial','testimonials','testimonies','testimony','testiness','testis','tetanus','tether','tethers','tethys','th','thalami','thalamus','thales','thames','thar','tharp','that','thatch','thatcher','thatches','thereupon','thermodynamic','thermodynamics','thermoplastic','thermoplastics','thermos','thermoses','thermostatic','theron','thesaurus','thesauruses','theses','theseus','thesis','thespis','thessaly','thiamin','thiamine','thick','thicker','thickness','thicknesses','thieu','thin','thine','think','thinker','thinkers','thinks','thinner','thinners','thinness','thins','thirstier','thirties','this','thistles','thither','tho','thoraces','thoracic','thoraxes','thoreau','thorn','thornier','thorns','thorny','those','thoth','thou','thous','thrace','thrash','thrasher','thrashers','thrashes','threat','threats','threes','threescore','threescores','threnody','thresh','thresher','threshers','threshes','threw','thru','thrush','thrushes','thunder','thunderclap','thunderclaps','thunderous','thunders','thundershower','thundershowers','thurber','thus','thutmose','thwack','thwacks','thy','ti','tiaras','tiber','tiberius','tibetan','tibetans','tibias','tic','tick','ticker','tickers','ticklish','ticks','ticonderoga','tics','tidy','tier','tiers','ties','tiff','tiffs','tikes','timon','timothy','tin','tina','tinder','tinderboxes','tine','tines','tinge','tinges','tinier','tinker','tinkers','tinnier','tinny','tins','tinsmith','tinsmiths','tintinnabulation','tintinnabulations','tiny','tip','tipi','tipis','tipper','tipperary','tippers','tips','tipsier','tipsy','tiptoes','tiptop','tiptops','tirana','tire','tires','tiresias','tissues','titan','titanic','titans','tithes','titicaca','titles','titmice','titmouse','tl','tlaloc','tm','u','ubs','ucayali','ucla','uh','ukraine','ululate','ululates','unanimous','unawares','unbar','unbars','unbeknown','unbend','unbends','unbind','unbinds','unborn','unbound','uncannier','uncanny','unceremonious','uncertain','uncertainties','unclasp','unclasps','uncles','unclothes','unconcern','unconditional','unconscious','unconsciousness','uncouth','uncover','uncovers','uncritical','unction','unctions','under','underbrush','undercharge','undercharges','underclass','underclothes','undercoat','undercoats','undercover','underlain','underlay','underlays','underlies','underline','underlines','underneath','underneaths','underpass','underpasses','underpay','underpays','underpin','underpins','underplay','underplays','underrate','underrates','underscore','underscores','understand','understands','understate','understates','undertaker','undertakers','undertakes','undervalues','underwater','undies','undo','undoes','undone','undress','undresses','undulate','undulates','undulation','undulations','unearth','unearths','uneasier','uneasiness','uneasy','unemotional','unenthusiastic','unethical','unfamiliar','unfasten','unfastens','unfrock','unfrocks','unfunny','ungainlier','ungainliness','unhealthier','unhealthy','unhinge','unhinges','unhitch','unhitches','unholier','unhook','unhooks','unicameral','unicorn','unicorns','unicycles','unification','unifies','unify','unintentional','union','unions','uniroyal','unison','unitas','unite','unites','unities','universal','universals','universe','universes','universities','unkind','unkinder','unkindlier','unkindness','unknown','unknowns','unlace','unlaces','unlatch','unlatches','unluckier','unlucky','unnecessary','unnerves','unofficial','unpack','unpacks','unpick','unpin','unpins','unpretentious','unprofessional','unreal','unrealistic','unregenerate','unrulier','unruliness','unscrew','unscrews','unseal','unseals','unseat','unseats','unsheathe','unsheathes','unsnap','unsnaps','unsound','unsounder','unsubscribe','unsubscribes','unsubstantial','unsure','untidy','unties','unusual','unwarier','unwariness','unwary','unwind','unwinds','unwise','unwiser','unwound','unwrap','unwraps','up','upbeat','upbeats','upchuck','upchucks','updater','updates','upland','uplands','upon','upper','uppercase','uppers','upraise','upraises','uproar','uproarious','uproars','ups','upstage','upstages','upstairs','upstate','uptakes','upton','uptown','upturn','upturns','uranus','urethra','urethras','urey','urn','urns','us','usages','use','user','users','uses','usher','ushers','ustinov','usual','utah','ute','uterus','uteruses','utilities','uvula','uvulas','v','vacates','vacation','vacationer','vacationers','vacations','vaccinate','vaccinates','vaccination','vaccinations','vaccine','vaccines','vacuous','vagaries','vagary','vagina','vaginal','vaginas','vagrancy','vaguer','val','vales','valhalla','valise','valises','valois','valparaiso','valuation','valuations','values','valves','valvoline','vamoose','vamooses','vamp','vampire','vampires','vamps','vargas','variation','variations','varicose','varies','various','varnish','varnishes','varsities','vary','vases','vassal','vassals','vassar','vastly','vat','vatican','vats','vauban','veracious','verb','verbal','verbals','verbena','verbenas','verbose','verbs','verge','verges','verier','verification','verifies','verify','verities','verlaine','vermouth','vern','verna','vernal','verne','vernon','verona','veronese','veronica','verse','verses','versification','versifies','versify','version','versions','versus','vertebra','vertebral','vertebras','vertebrate','vertebrates','vertexes','vertical','verticals','vertices','very','vesalius','vesicles','vesper','vespers','vespucci','vesta','vestige','vestiges','vesuvius','viagra','vial','vials','vibe','vibes','vibrancy','vibrantly','vibraphone','vibraphones','vibrate','vibrates','vibration','vibrations','vibrato','vibratos','vic','vicar','vicarage','vicarages','vicarious','vicars','vice','vicente','viceroy','viceroys','vices','vichy','vichyssoise','vicious','viciousness','vicki','vicky','vies','vila','vilification','vilifies','vilify','vince','vindicate','vindicates','vindication','vindications','vine','vines','vinson','vintage','vintages','viola','violas','violate','violates','violation','violations','violin','violins','viper','vipers','virago','viragoes','viragos','viral','vireo','vireos','virus','viruses','visages','visas','viscera','visceral','viscountess','viscountesses','viscous','viscus','vise','vises','vishnu','vision','visionaries','visionary','visions','visitation','visitations','vista','vistas','visual','visuals','vitiates','vitiation','vivacious','vivaciousness','vivas','vivifies','vivify','viviparous','vivisection','vixen','vixenish','vixens','vlasic','vocal','vocalic','vocals','vocation','vocational','vocations','vocatives','vociferate','vociferates','vociferation','vociferous','voice','voices','volition','volunteer','volunteers','voluptuaries','voluptuary','voluptuous','voluptuousness','voracious','vote','votes','vouch','voucher','vouchers','vouches','vow','vows','voyager','voyagers','voyages','vs','w','wac','wackier','wackiness','wacko','wackos','wacky','waco','wag','wager','wagers','wages','wagner','wagon','wagoner','wagoners','wagons','wags','wales','walk','walker','walkers','walks','wallabies','wallaby','wallace','wallis','walrus','walruses','walsh','war','wares','warier','wariness','warmth','warn','warner','warns','warp','warpath','warpaths','warps','warranties','warren','warrens','wars','warship','warships','wartier','warwick','wary','was','wasatch','wash','washbasin','washbasins','washcloth','washcloths','washer','washers','washes','washstand','washstands','wasp','waspish','wasps','wastage','waste','wasteland','wastelands','wastepaper','wastes','watch','watchband','watchbands','watcher','watchers','watches','water','watercress','watergate','waterier','waterline','waterlines','waterpower','waterproof','waterproofs','waters','watery','watkins','watson','wattage','watteau','wattles','watusi','wesson','westbound','western','westerner','westerners','westerns','whack','whackier','whacks','whacky','whaler','whalers','whales','wham','whams','wharf','wharfs','wharves','what','whats','wheal','wheals','wheat','wheaties','when','whence','whenever','whens','where','whereas','whereat','whereby','wherefore','wherefores','wherein','whereof','whereon','wheres','whereupon','wherever','wherewithal','whether','whew','whey','which','whichever','whiff','whiffs','whine','whiner','whiners','whines','whinier','whinnies','whinny','whiny','whip','whiplash','whiplashes','whippersnapper','whippersnappers','whips','whir','whirs','whisk','whisker','whiskers','whiskies','whisks','whisky','whiskys','whisper','whispers','whistler','whistlers','whistles','whitaker','white','whitecap','whitecaps','whitefish','whitefishes','whiten','whitener','whiteners','whiteness','whitens','whites','whitewash','whitewashes','whither','whitish','who','whoop','whoops','whoosh','whooshes','whopper','whoppers','whore','whorehouse','whorehouses','whores','whose','why','whys','wicca','wichita','wick','wicker','wickers','wicks','wife','wifelier','wigeon','wigeons','wii','wiki','wikis','wilier','wiliness','win','wince','winces','winch','winches','wind','windbag','windbags','windburn','windier','windiness','windlass','windlasses','window','windowpane','windowpanes','windows','windpipes','winds','windsock','windsocks','windsurf','windsurfs','windup','windups','windy','wine','wineries','winery','wines','winfrey','wink','winks','winnebago','winner','winners','winnow','winnows','wino','winos','wins','wiper','wipers','wipes','wire','wires','wiretap','wiretaps','wirier','wiriness','wiry','wis','wisconsin','wisconsinite','wisconsinites','wise','wiseacre','wiseacres','wisecrack','wisecracks','wiser','wises','wish','wishbone','wishbones','wisher','wishers','wishes','wisp','wispier','wisps','wispy','witch','witchery','witches','with','withal','wither','withers','within','withstand','withstands','witless','wives','wobblier','woes','wok','woks','won','wonder','wonderbra','wonderland','wonderlands','wonders','woo','woodbine','woody','wooer','wooers','woof','woofer','woofers','woofs','woolier','woolies','woolite','woos','wooten','wore','worn','wotan','wound','wounder','wounds','wow','wows','wrack','wraith','wraiths','wrap','wraparound','wraparounds','wrapper','wrappers','wraps','wrapt','wrath','wreath','wreathe','wreathes','wreaths','wreck','wrecker','wreckers','wrecks','wren','wrench','wrenches','wrens','wrestler','wrestlers','wrestles','wretch','wretches','wu','wuss','wusses','wycliffe','wynn','wyo','xe','xenakis','xenon','xenophobic','xenophon','y','yacc','yack','yacks','yalow','yalta','yalu','yam','yamoussoukro','yams','yarn','yarns','yataro','yates','yb','yerkes','yes','yeses','yeshivas','yeshivoth','yip','yips','yo','yock','yocks','yoga','yokes','yoko','yon','yonder','yonkers','yore','yoruba','you','yous','youth','youths','ypres','ypsilanti','yucatan','yucca','yuccas','yuck','yuckier','yucks','yucky','yuk','yukon','yuks','yunnan','yup','yuppies','yuppy','yups','yves','yvonne','zn','zr']
| mit |
mr3bn/DAT210x | Module6/assignment3.py | 1 | 3383 | import matplotlib.pyplot as plt
import pandas as pd
def load(path_test, path_train):
# Load up the data.
# You probably could have written this..
with open(path_test, 'r') as f: testing = pd.read_csv(f)
with open(path_train, 'r') as f: training = pd.read_csv(f)
# The number of samples between training and testing can vary
# But the number of features better remain the same!
n_features = testing.shape[1]
X_test = testing.ix[:,:n_features-1]
X_train = training.ix[:,:n_features-1]
y_test = testing.ix[:,n_features-1:].values.ravel()
y_train = training.ix[:,n_features-1:].values.ravel()
#
# Special:
return X_train, X_test, y_train, y_test
def peekData(x):
# The 'targets' or labels are stored in y. The 'samples' or data is stored in X
print "Peeking your data..."
fig = plt.figure()
cnt = 0
for col in range(5):
for row in range(10):
plt.subplot(5, 10, cnt + 1)
plt.imshow(x.ix[cnt,:].reshape(8,8), cmap=plt.cm.gray_r, interpolation='nearest')
plt.axis('off')
cnt += 1
fig.set_tight_layout(True)
plt.show()
def drawPredictions(model, X_train, X_test, y_train, y_test):
fig = plt.figure()
# Make some guesses
y_guess = model.predict(X_test)
#
# INFO: This is the second lab we're demonstrating how to
# do multi-plots using matplot lab. In the next assignment(s),
# it'll be your responsibility to use this and assignment #1
# as tutorials to add in the plotting code yourself!
num_rows = 10
num_cols = 5
index = 0
for col in range(num_cols):
for row in range(num_rows):
plt.subplot(num_cols, num_rows, index + 1)
# 8x8 is the size of the image, 64 pixels
plt.imshow(X_test.ix[index,:].reshape(8,8), cmap=plt.cm.gray_r, interpolation='nearest')
# Green = Guessed right
# Red = Fail!
fontcolor = 'g' if y_test[index] == y_guess[index] else 'r'
plt.title('Label: %i' % y_guess[index], fontsize=6, color=fontcolor)
plt.axis('off')
index += 1
fig.set_tight_layout(True)
plt.show()
X = pd.read_table('Datasets/parkinsons.data', delimiter=',', index_col='name')
y = X['status']
del X['status']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 7)
import sklearn.preprocessing as pre
transformer = pre.StandardScaler()
transformer.fit(X_train)
X_train = transformer.transform(X_train)
X_test = transformer.transform(X_test)
#from sklearn.decomposition import PCA
#pca = PCA(n_components=14)
#pca.fit(X_train)
#X_train = pca.transform(X_train)
#X_test = pca.transform(X_test)
from sklearn import manifold
iso = manifold.Isomap(n_neighbors=5, n_components=6)
iso.fit(X_train)
X_train = iso.transform(X_train)
X_test = iso.transform(X_test)
from sklearn.svm import SVC
svc = SVC()
svc.fit(X_train, y_train)
print svc.score(X_test, y_test)
import numpy as np
c_range = np.arange(0.05, 2, 0.05)
gamma_range = np.arange(0.001, 0.1, 0.001)
best_c = 0
best_gamma = 0
best_score = 0
for c in c_range:
for g in gamma_range:
svc = SVC(C=c, gamma=g)
svc.fit(X_train, y_train)
if svc.score(X_test, y_test) > best_score:
best_c = c
best_gamma = g
best_score = svc.score(X_test, y_test)
print best_score
print 'C: ', best_c
print 'gamma: ', best_gamma
| mit |
amandalund/openmc | docs/source/conf.py | 3 | 7739 | # -*- coding: utf-8 -*-
#
# metasci documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 7 22:29:49 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Determine if we're on Read the Docs server
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On Read the Docs, we need to mock a few third-party modules so we don't get
# ImportErrors when building documentation
from unittest.mock import MagicMock
MOCK_MODULES = [
'openmoc', 'openmc.data.reconstruct',
]
sys.modules.update((mod_name, MagicMock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinxcontrib.katex',
'sphinx_numfig',
'nbsphinx'
]
if not on_rtd:
extensions.append('sphinxcontrib.rsvgconverter')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'OpenMC'
copyright = '2011-2020, Massachusetts Institute of Technology and OpenMC contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.12"
# The full version, including alpha/beta/rc tags.
release = "0.12.1-dev"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
#pygments_style = 'friendly'
#pygments_style = 'bw'
#pygments_style = 'fruity'
#pygments_style = 'manni'
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = '_images/openmc_logo.png'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "OpenMC Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
app.add_css_file('theme_overrides.css')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'openmcdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'openmc.tex', 'OpenMC Documentation',
'OpenMC contributors', 'manual'),
]
latex_elements = {
'preamble': r"""
\usepackage{enumitem}
\usepackage{amsfonts}
\usepackage{amsmath}
\setlistdepth{99}
\usepackage{tikz}
\usetikzlibrary{shapes,snakes,shadows,arrows,calc,decorations.markings,patterns,fit,matrix,spy}
\usepackage{fixltx2e}
\hypersetup{bookmarksdepth=3}
\setcounter{tocdepth}{2}
\numberwithin{equation}{section}
""",
'printindex': r""
}
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
#Autodocumentation Flags
#autodoc_member_order = "groupwise"
#autoclass_content = "both"
autosummary_generate = True
napoleon_use_ivar = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'matplotlib': ('https://matplotlib.org/', None)
}
| mit |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/io/sas/sas7bdat.py | 7 | 26963 | """
Read SAS7BDAT files
Based on code written by Jared Hobbs:
https://bitbucket.org/jaredhobbs/sas7bdat
See also:
https://github.com/BioStatMatt/sas7bdat
Partial documentation of the file format:
https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf
Reference for binary data compression:
http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
"""
import pandas as pd
from pandas import compat
from pandas.io.common import get_filepath_or_buffer, BaseIterator
import numpy as np
import struct
import pandas.io.sas.sas_constants as const
from pandas.io.sas.saslib import Parser
class _subheader_pointer(object):
pass
class _column(object):
pass
# SAS7BDAT represents a SAS data file in SAS7BDAT format.
class SAS7BDATReader(BaseIterator):
"""
Read SAS files in SAS7BDAT format.
Parameters
----------
path_or_buf : path name or buffer
Name of SAS file or file-like object pointing to SAS file
contents.
index : column identifier, defaults to None
Column to use as index.
convert_dates : boolean, defaults to True
Attempt to convert dates to Pandas datetime values. Note all
SAS date formats are supported.
blank_missing : boolean, defaults to True
Convert empty strings to missing values (SAS uses blanks to
indicate missing character variables).
chunksize : int, defaults to None
Return SAS7BDATReader object for iterations, returns chunks
with given number of lines.
encoding : string, defaults to None
String encoding.
convert_text : bool, defaults to True
If False, text variables are left as raw bytes.
convert_header_text : bool, defaults to True
If False, header text, including column names, are left as raw
bytes.
"""
def __init__(self, path_or_buf, index=None, convert_dates=True,
blank_missing=True, chunksize=None, encoding=None,
convert_text=True, convert_header_text=True):
self.index = index
self.convert_dates = convert_dates
self.blank_missing = blank_missing
self.chunksize = chunksize
self.encoding = encoding
self.convert_text = convert_text
self.convert_header_text = convert_header_text
self.default_encoding = "latin-1"
self.compression = ""
self.column_names_strings = []
self.column_names = []
self.column_types = []
self.column_formats = []
self.columns = []
self._current_page_data_subheader_pointers = []
self._cached_page = None
self._column_data_lengths = []
self._column_data_offsets = []
self._current_row_in_file_index = 0
self._current_row_on_page_index = 0
self._current_row_in_file_index = 0
self._path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if isinstance(self._path_or_buf, compat.string_types):
self._path_or_buf = open(self._path_or_buf, 'rb')
self.handle = self._path_or_buf
self._get_properties()
self._parse_metadata()
def close(self):
try:
self.handle.close()
except AttributeError:
pass
def _get_properties(self):
# Check magic number
self._path_or_buf.seek(0)
self._cached_page = self._path_or_buf.read(288)
if self._cached_page[0:len(const.magic)] != const.magic:
self.close()
raise ValueError("magic number mismatch (not a SAS file?)")
# Get alignment information
align1, align2 = 0, 0
buf = self._read_bytes(const.align_1_offset, const.align_1_length)
if buf == const.u64_byte_checker_value:
align2 = const.align_2_value
self.U64 = True
self._int_length = 8
self._page_bit_offset = const.page_bit_offset_x64
self._subheader_pointer_length = const.subheader_pointer_length_x64
else:
self.U64 = False
self._page_bit_offset = const.page_bit_offset_x86
self._subheader_pointer_length = const.subheader_pointer_length_x86
self._int_length = 4
buf = self._read_bytes(const.align_2_offset, const.align_2_length)
if buf == const.align_1_checker_value:
align1 = const.align_2_value
total_align = align1 + align2
# Get endianness information
buf = self._read_bytes(const.endianness_offset,
const.endianness_length)
if buf == b'\x01':
self.byte_order = "<"
else:
self.byte_order = ">"
# Get encoding information
buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]
if buf in const.encoding_names:
self.file_encoding = const.encoding_names[buf]
else:
self.file_encoding = "unknown (code=%s)" % str(buf)
# Get platform information
buf = self._read_bytes(const.platform_offset, const.platform_length)
if buf == b'1':
self.platform = "unix"
elif buf == b'2':
self.platform = "windows"
else:
self.platform = "unknown"
buf = self._read_bytes(const.dataset_offset, const.dataset_length)
self.name = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.name = self.name.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.file_type_offset, const.file_type_length)
self.file_type = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.file_type = self.file_type.decode(
self.encoding or self.default_encoding)
# Timestamp is epoch 01/01/1960
epoch = pd.datetime(1960, 1, 1)
x = self._read_float(const.date_created_offset + align1,
const.date_created_length)
self.date_created = epoch + pd.to_timedelta(x, unit='s')
x = self._read_float(const.date_modified_offset + align1,
const.date_modified_length)
self.date_modified = epoch + pd.to_timedelta(x, unit='s')
self.header_length = self._read_int(const.header_size_offset + align1,
const.header_size_length)
# Read the rest of the header into cached_page.
buf = self._path_or_buf.read(self.header_length - 288)
self._cached_page += buf
if len(self._cached_page) != self.header_length:
self.close()
raise ValueError("The SAS7BDAT file appears to be truncated.")
self._page_length = self._read_int(const.page_size_offset + align1,
const.page_size_length)
self._page_count = self._read_int(const.page_count_offset + align1,
const.page_count_length)
buf = self._read_bytes(const.sas_release_offset + total_align,
const.sas_release_length)
self.sas_release = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.sas_release = self.sas_release.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.sas_server_type_offset + total_align,
const.sas_server_type_length)
self.server_type = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.server_type = self.server_type.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.os_version_number_offset + total_align,
const.os_version_number_length)
self.os_version = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.os_version = self.os_version.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.os_name_offset + total_align,
const.os_name_length)
buf = buf.rstrip(b'\x00 ')
if len(buf) > 0:
self.os_name = buf.decode(self.encoding or self.default_encoding)
else:
buf = self._read_bytes(const.os_maker_offset + total_align,
const.os_maker_length)
self.os_name = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.os_name = self.os_name.decode(
self.encoding or self.default_encoding)
def __next__(self):
da = self.read(nrows=self.chunksize or 1)
if da is None:
raise StopIteration
return da
# Read a single float of the given width (4 or 8).
def _read_float(self, offset, width):
if width not in (4, 8):
self.close()
raise ValueError("invalid float width")
buf = self._read_bytes(offset, width)
fd = "f" if width == 4 else "d"
return struct.unpack(self.byte_order + fd, buf)[0]
# Read a single signed integer of the given width (1, 2, 4 or 8).
def _read_int(self, offset, width):
if width not in (1, 2, 4, 8):
self.close()
raise ValueError("invalid int width")
buf = self._read_bytes(offset, width)
it = {1: "b", 2: "h", 4: "l", 8: "q"}[width]
iv = struct.unpack(self.byte_order + it, buf)[0]
return iv
def _read_bytes(self, offset, length):
if self._cached_page is None:
self._path_or_buf.seek(offset)
buf = self._path_or_buf.read(length)
if len(buf) < length:
self.close()
msg = "Unable to read {:d} bytes from file position {:d}."
raise ValueError(msg.format(length, offset))
return buf
else:
if offset + length > len(self._cached_page):
self.close()
raise ValueError("The cached page is too small.")
return self._cached_page[offset:offset + length]
def _parse_metadata(self):
done = False
while not done:
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
break
if len(self._cached_page) != self._page_length:
self.close()
raise ValueError(
"Failed to read a meta data page from the SAS file.")
done = self._process_page_meta()
def _process_page_meta(self):
self._read_page_header()
pt = [const.page_meta_type, const.page_amd_type] + const.page_mix_types
if self._current_page_type in pt:
self._process_page_metadata()
return ((self._current_page_type in [256] + const.page_mix_types) or
(self._current_page_data_subheader_pointers is not None))
def _read_page_header(self):
bit_offset = self._page_bit_offset
tx = const.page_type_offset + bit_offset
self._current_page_type = self._read_int(tx, const.page_type_length)
tx = const.block_count_offset + bit_offset
self._current_page_block_count = self._read_int(
tx, const.block_count_length)
tx = const.subheader_count_offset + bit_offset
self._current_page_subheaders_count = (
self._read_int(tx, const.subheader_count_length))
def _process_page_metadata(self):
bit_offset = self._page_bit_offset
for i in range(self._current_page_subheaders_count):
pointer = self._process_subheader_pointers(
const.subheader_pointers_offset + bit_offset, i)
if pointer.length == 0:
continue
if pointer.compression == const.truncated_subheader_id:
continue
subheader_signature = self._read_subheader_signature(
pointer.offset)
subheader_index = (
self._get_subheader_index(subheader_signature,
pointer.compression, pointer.ptype))
self._process_subheader(subheader_index, pointer)
def _get_subheader_index(self, signature, compression, ptype):
index = const.subheader_signature_to_index.get(signature)
if index is None:
f1 = ((compression == const.compressed_subheader_id) or
(compression == 0))
f2 = (ptype == const.compressed_subheader_type)
if (self.compression != "") and f1 and f2:
index = const.index.dataSubheaderIndex
else:
self.close()
raise ValueError("Unknown subheader signature")
return index
def _process_subheader_pointers(self, offset, subheader_pointer_index):
subheader_pointer_length = self._subheader_pointer_length
total_offset = (offset +
subheader_pointer_length * subheader_pointer_index)
subheader_offset = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_length = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_compression = self._read_int(total_offset, 1)
total_offset += 1
subheader_type = self._read_int(total_offset, 1)
x = _subheader_pointer()
x.offset = subheader_offset
x.length = subheader_length
x.compression = subheader_compression
x.ptype = subheader_type
return x
def _read_subheader_signature(self, offset):
subheader_signature = self._read_bytes(offset, self._int_length)
return subheader_signature
def _process_subheader(self, subheader_index, pointer):
offset = pointer.offset
length = pointer.length
if subheader_index == const.index.rowSizeIndex:
processor = self._process_rowsize_subheader
elif subheader_index == const.index.columnSizeIndex:
processor = self._process_columnsize_subheader
elif subheader_index == const.index.columnTextIndex:
processor = self._process_columntext_subheader
elif subheader_index == const.index.columnNameIndex:
processor = self._process_columnname_subheader
elif subheader_index == const.index.columnAttributesIndex:
processor = self._process_columnattributes_subheader
elif subheader_index == const.index.formatAndLabelIndex:
processor = self._process_format_subheader
elif subheader_index == const.index.columnListIndex:
processor = self._process_columnlist_subheader
elif subheader_index == const.index.subheaderCountsIndex:
processor = self._process_subheader_counts
elif subheader_index == const.index.dataSubheaderIndex:
self._current_page_data_subheader_pointers.append(pointer)
return
else:
raise ValueError("unknown subheader index")
processor(offset, length)
def _process_rowsize_subheader(self, offset, length):
int_len = self._int_length
lcs_offset = offset
lcp_offset = offset
if self.U64:
lcs_offset += 682
lcp_offset += 706
else:
lcs_offset += 354
lcp_offset += 378
self.row_length = self._read_int(
offset + const.row_length_offset_multiplier * int_len, int_len)
self.row_count = self._read_int(
offset + const.row_count_offset_multiplier * int_len, int_len)
self.col_count_p1 = self._read_int(
offset + const.col_count_p1_multiplier * int_len, int_len)
self.col_count_p2 = self._read_int(
offset + const.col_count_p2_multiplier * int_len, int_len)
mx = const.row_count_on_mix_page_offset_multiplier * int_len
self._mix_page_row_count = self._read_int(offset + mx, int_len)
self._lcs = self._read_int(lcs_offset, 2)
self._lcp = self._read_int(lcp_offset, 2)
def _process_columnsize_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
self.column_count = self._read_int(offset, int_len)
if (self.col_count_p1 + self.col_count_p2 !=
self.column_count):
print("Warning: column count mismatch (%d + %d != %d)\n",
self.col_count_p1, self.col_count_p2, self.column_count)
# Unknown purpose
def _process_subheader_counts(self, offset, length):
pass
def _process_columntext_subheader(self, offset, length):
offset += self._int_length
text_block_size = self._read_int(offset, const.text_block_size_length)
buf = self._read_bytes(offset, text_block_size)
cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")
cname = cname_raw
if self.convert_header_text:
cname = cname.decode(self.encoding or self.default_encoding)
self.column_names_strings.append(cname)
if len(self.column_names_strings) == 1:
compression_literal = ""
for cl in const.compression_literals:
if cl in cname_raw:
compression_literal = cl
self.compression = compression_literal
offset -= self._int_length
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
compression_literal = buf.rstrip(b"\x00")
if compression_literal == "":
self._lcs = 0
offset1 = offset + 32
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp]
elif compression_literal == const.rle_compression:
offset1 = offset + 40
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp]
elif self._lcs > 0:
self._lcp = 0
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcs)
self.creator_proc = buf[0:self._lcp]
if self.convert_header_text:
if hasattr(self, "creator_proc"):
self.creator_proc = self.creator_proc.decode(
self.encoding or self.default_encoding)
def _process_columnname_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
column_name_pointers_count = (length - 2 * int_len - 12) // 8
for i in range(column_name_pointers_count):
text_subheader = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_text_subheader_offset
col_name_offset = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_offset_offset
col_name_length = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_length_offset
idx = self._read_int(
text_subheader, const.column_name_text_subheader_length)
col_offset = self._read_int(
col_name_offset, const.column_name_offset_length)
col_len = self._read_int(
col_name_length, const.column_name_length_length)
name_str = self.column_names_strings[idx]
self.column_names.append(name_str[col_offset:col_offset + col_len])
def _process_columnattributes_subheader(self, offset, length):
int_len = self._int_length
column_attributes_vectors_count = (
length - 2 * int_len - 12) // (int_len + 8)
self.column_types = np.empty(
column_attributes_vectors_count, dtype=np.dtype('S1'))
self._column_data_lengths = np.empty(
column_attributes_vectors_count, dtype=np.int64)
self._column_data_offsets = np.empty(
column_attributes_vectors_count, dtype=np.int64)
for i in range(column_attributes_vectors_count):
col_data_offset = (offset + int_len +
const.column_data_offset_offset +
i * (int_len + 8))
col_data_len = (offset + 2 * int_len +
const.column_data_length_offset +
i * (int_len + 8))
col_types = (offset + 2 * int_len +
const.column_type_offset + i * (int_len + 8))
x = self._read_int(col_data_offset, int_len)
self._column_data_offsets[i] = x
x = self._read_int(col_data_len, const.column_data_length_length)
self._column_data_lengths[i] = x
x = self._read_int(col_types, const.column_type_length)
if x == 1:
self.column_types[i] = b'd'
else:
self.column_types[i] = b's'
def _process_columnlist_subheader(self, offset, length):
# unknown purpose
pass
def _process_format_subheader(self, offset, length):
int_len = self._int_length
text_subheader_format = (
offset +
const.column_format_text_subheader_index_offset +
3 * int_len)
col_format_offset = (offset +
const.column_format_offset_offset +
3 * int_len)
col_format_len = (offset +
const.column_format_length_offset +
3 * int_len)
text_subheader_label = (
offset +
const.column_label_text_subheader_index_offset +
3 * int_len)
col_label_offset = (offset +
const.column_label_offset_offset +
3 * int_len)
col_label_len = offset + const.column_label_length_offset + 3 * int_len
x = self._read_int(text_subheader_format,
const.column_format_text_subheader_index_length)
format_idx = min(x, len(self.column_names_strings) - 1)
format_start = self._read_int(
col_format_offset, const.column_format_offset_length)
format_len = self._read_int(
col_format_len, const.column_format_length_length)
label_idx = self._read_int(
text_subheader_label,
const.column_label_text_subheader_index_length)
label_idx = min(label_idx, len(self.column_names_strings) - 1)
label_start = self._read_int(
col_label_offset, const.column_label_offset_length)
label_len = self._read_int(col_label_len,
const.column_label_length_length)
label_names = self.column_names_strings[label_idx]
column_label = label_names[label_start: label_start + label_len]
format_names = self.column_names_strings[format_idx]
column_format = format_names[format_start: format_start + format_len]
current_column_number = len(self.columns)
col = _column()
col.col_id = current_column_number
col.name = self.column_names[current_column_number]
col.label = column_label
col.format = column_format
col.ctype = self.column_types[current_column_number]
col.length = self._column_data_lengths[current_column_number]
self.column_formats.append(column_format)
self.columns.append(col)
def read(self, nrows=None):
if (nrows is None) and (self.chunksize is not None):
nrows = self.chunksize
elif nrows is None:
nrows = self.row_count
if self._current_row_in_file_index >= self.row_count:
return None
m = self.row_count - self._current_row_in_file_index
if nrows > m:
nrows = m
nd = (self.column_types == b'd').sum()
ns = (self.column_types == b's').sum()
self._string_chunk = np.empty((ns, nrows), dtype=np.object)
self._byte_chunk = np.empty((nd, 8 * nrows), dtype=np.uint8)
self._current_row_in_chunk_index = 0
p = Parser(self)
p.read(nrows)
rslt = self._chunk_to_dataframe()
if self.index is not None:
rslt = rslt.set_index(self.index)
return rslt
def _read_next_page(self):
self._current_page_data_subheader_pointers = []
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
return True
elif len(self._cached_page) != self._page_length:
self.close()
msg = ("failed to read complete page from file "
"(read {:d} of {:d} bytes)")
raise ValueError(msg.format(len(self._cached_page),
self._page_length))
self._read_page_header()
if self._current_page_type == const.page_meta_type:
self._process_page_metadata()
pt = [const.page_meta_type, const.page_data_type]
pt += [const.page_mix_types]
if self._current_page_type not in pt:
return self._read_next_page()
return False
def _chunk_to_dataframe(self):
n = self._current_row_in_chunk_index
m = self._current_row_in_file_index
ix = range(m - n, m)
rslt = pd.DataFrame(index=ix)
js, jb = 0, 0
for j in range(self.column_count):
name = self.column_names[j]
if self.column_types[j] == b'd':
rslt[name] = self._byte_chunk[jb, :].view(
dtype=self.byte_order + 'd')
rslt[name] = np.asarray(rslt[name], dtype=np.float64)
if self.convert_dates and (self.column_formats[j] == "MMDDYY"):
epoch = pd.datetime(1960, 1, 1)
rslt[name] = epoch + pd.to_timedelta(rslt[name], unit='d')
jb += 1
elif self.column_types[j] == b's':
rslt[name] = self._string_chunk[js, :]
if self.convert_text and (self.encoding is not None):
rslt[name] = rslt[name].str.decode(
self.encoding or self.default_encoding)
if self.blank_missing:
ii = rslt[name].str.len() == 0
rslt.loc[ii, name] = np.nan
js += 1
else:
self.close()
raise ValueError("unknown column type %s" %
self.column_types[j])
return rslt
| apache-2.0 |
yavalvas/yav_com | build/matplotlib/examples/pylab_examples/demo_ribbon_box.py | 6 | 4262 | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.image import BboxImage
from matplotlib._png import read_png
import matplotlib.colors
from matplotlib.cbook import get_sample_data
class RibbonBox(object):
original_image = read_png(get_sample_data("Minduka_Present_Blue_Pack.png",
asfileobj=False))
cut_location = 70
b_and_h = original_image[:,:,2]
color = original_image[:,:,2] - original_image[:,:,0]
alpha = original_image[:,:,3]
nx = original_image.shape[1]
def __init__(self, color):
rgb = matplotlib.colors.colorConverter.to_rgb(color)
im = np.empty(self.original_image.shape,
self.original_image.dtype)
im[:,:,:3] = self.b_and_h[:,:,np.newaxis]
im[:,:,:3] -= self.color[:,:,np.newaxis]*(1.-np.array(rgb))
im[:,:,3] = self.alpha
self.im = im
def get_stretched_image(self, stretch_factor):
stretch_factor = max(stretch_factor, 1)
ny, nx, nch = self.im.shape
ny2 = int(ny*stretch_factor)
stretched_image = np.empty((ny2, nx, nch),
self.im.dtype)
cut = self.im[self.cut_location,:,:]
stretched_image[:,:,:] = cut
stretched_image[:self.cut_location,:,:] = \
self.im[:self.cut_location,:,:]
stretched_image[-(ny-self.cut_location):,:,:] = \
self.im[-(ny-self.cut_location):,:,:]
self._cached_im = stretched_image
return stretched_image
class RibbonBoxImage(BboxImage):
zorder = 1
def __init__(self, bbox, color,
cmap = None,
norm = None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample = False,
**kwargs
):
BboxImage.__init__(self, bbox,
cmap = cmap,
norm = norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample = resample,
**kwargs
)
self._ribbonbox = RibbonBox(color)
self._cached_ny = None
def draw(self, renderer, *args, **kwargs):
bbox = self.get_window_extent(renderer)
stretch_factor = bbox.height / bbox.width
ny = int(stretch_factor*self._ribbonbox.nx)
if self._cached_ny != ny:
arr = self._ribbonbox.get_stretched_image(stretch_factor)
self.set_array(arr)
self._cached_ny = ny
BboxImage.draw(self, renderer, *args, **kwargs)
if 1:
from matplotlib.transforms import Bbox, TransformedBbox
from matplotlib.ticker import ScalarFormatter
fig, ax = plt.subplots()
years = np.arange(2004, 2009)
box_colors = [(0.8, 0.2, 0.2),
(0.2, 0.8, 0.2),
(0.2, 0.2, 0.8),
(0.7, 0.5, 0.8),
(0.3, 0.8, 0.7),
]
heights = np.random.random(years.shape) * 7000 + 3000
fmt = ScalarFormatter(useOffset=False)
ax.xaxis.set_major_formatter(fmt)
for year, h, bc in zip(years, heights, box_colors):
bbox0 = Bbox.from_extents(year-0.4, 0., year+0.4, h)
bbox = TransformedBbox(bbox0, ax.transData)
rb_patch = RibbonBoxImage(bbox, bc, interpolation="bicubic")
ax.add_artist(rb_patch)
ax.annotate(r"%d" % (int(h/100.)*100),
(year, h), va="bottom", ha="center")
patch_gradient = BboxImage(ax.bbox,
interpolation="bicubic",
zorder=0.1,
)
gradient = np.zeros((2, 2, 4), dtype=np.float)
gradient[:,:,:3] = [1, 1, 0.]
gradient[:,:,3] = [[0.1, 0.3],[0.3, 0.5]] # alpha channel
patch_gradient.set_array(gradient)
ax.add_artist(patch_gradient)
ax.set_xlim(years[0]-0.5, years[-1]+0.5)
ax.set_ylim(0, 10000)
fig.savefig('ribbon_box.png')
plt.show()
| mit |
aebrahim/cobrapy | setup.py | 1 | 7763 | from os.path import isfile, abspath, dirname, join
from sys import argv, path
# To temporarily modify sys.path
SETUP_DIR = abspath(dirname(__file__))
try:
from setuptools import setup, find_packages
except ImportError:
path.insert(0, SETUP_DIR)
import ez_setup
path.pop(0)
ez_setup.use_setuptools()
from setuptools import setup, find_packages
# for running parallel tests due to a bug in python 2.7.3
# http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing
except:
None
# import version to get the version string
path.insert(0, join(SETUP_DIR, "cobra"))
from version import get_version, update_release_version
path.pop(0)
version = get_version(pep440=True)
# If building something for distribution, ensure the VERSION
# file is up to date
if "sdist" in argv or "bdist_wheel" in argv:
update_release_version()
# cython is optional for building. The c file can be used directly. However,
# for certain functions, the c file must be generated, which requires cython.
try:
from Cython.Build import cythonize
from distutils.version import StrictVersion
import Cython
try:
cython_version = StrictVersion(Cython.__version__)
except ValueError:
raise ImportError("Cython version not parseable")
else:
if cython_version < StrictVersion("0.21"):
raise ImportError("Cython version too old to use")
except ImportError:
cythonize = None
for k in ["sdist", "develop"]:
if k in argv:
raise Exception("Cython >= 0.21 required for " + k)
# Begin constructing arguments for building
setup_kwargs = {}
# for building the cglpk solver
try:
from distutils.extension import Extension
from distutils.command.build_ext import build_ext
from os import name
from platform import system
class FailBuild(build_ext):
"""allow building of the C extension to fail"""
def run(self):
try:
build_ext.run(self)
except Exception as e:
warn(e)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except:
None
build_args = {}
setup_kwargs["cmdclass"] = {"build_ext": FailBuild}
# MAC OS X needs some additional configuration tweaks
# Build should be run with the python.org python
# Cython will output C which could generate warnings in clang
# due to the addition of additional unneeded functions. Because
# this is a known phenomenon, these warnings are silenced to
# make other potential warnings which do signal errors stand
# out.
if system() == "Darwin":
build_args["extra_compile_args"] = ["-Wno-unused-function"]
build_args["libraries"] = ["glpk"]
# It is possible to statically link libglpk to the built extension. This
# allows for simplified installation without the need to install libglpk to
# the system, and is also usueful when installing a particular version of
# glpk which conflicts with thesystem version. A static libglpk.a can be
# built by running configure with the export CLFAGS="-fPIC" and copying the
# file from src/.libs to either the default lib directory or to the build
# directory. For an example script, see
# https://gist.github.com/aebrahim/94a2b231d86821f7f225
include_dirs = []
library_dirs = []
if isfile("libglpk.a"):
library_dirs.append(abspath("."))
if isfile("glpk.h"):
include_dirs.append(abspath("."))
# if the glpk files are not in the current directory attempt to
# auto-detect their location by finding the location of the glpsol
# command
if name == "posix" and len(include_dirs) == 0 and len(library_dirs) == 0:
from subprocess import check_output
try:
glpksol_path = check_output(["which", "glpsol"],
universal_newlines=True).strip()
glpk_path = abspath(join(dirname(glpksol_path), ".."))
include_dirs.append(join(glpk_path, "include"))
library_dirs.append(join(glpk_path, "lib"))
except Exception as e:
print('Could not autodetect include and library dirs: ' + str(e))
if len(include_dirs) > 0:
build_args["include_dirs"] = include_dirs
if len(library_dirs) > 0:
build_args["library_dirs"] = library_dirs
# use cython if present, otherwise use c file
if cythonize:
ext_modules = cythonize([Extension("cobra.solvers.cglpk",
["cobra/solvers/cglpk.pyx"],
**build_args)],
force=True)
else:
ext_modules = [Extension("cobra.solvers.cglpk",
["cobra/solvers/cglpk.c"], **build_args)]
except Exception as e:
print('Could not build CGLPK: {}'.format(e))
ext_modules = None
extras = {
'matlab': ["pymatbridge"],
'sbml': ["python-libsbml", "lxml"],
'array': ["numpy>=1.6", "scipy>=0.11.0"],
'display': ["matplotlib", "palettable", "pandas>=0.17.0"]
}
all_extras = {'Cython>=0.21'}
for extra in extras.values():
all_extras.update(extra)
extras["all"] = sorted(list(all_extras))
# If using bdist_wininst, the installer will not get dependencies like
# a setuptools installation does. Therefore, for the one external dependency,
# which is six.py, we can just download it here and include it in the
# installer.
# The file six.py will need to be manually downloaded and placed in the
# same directory as setup.py.
if "bdist_wininst" in argv:
setup_kwargs["py_modules"] = ["six"]
try:
import pypandoc
readme = pypandoc.convert("README.md", "rst")
install = pypandoc.convert("INSTALL.md", "rst")
setup_kwargs["long_description"] = readme + "\n\n" + install
except:
with open("README.md", "r") as infile:
setup_kwargs["long_description"] = infile.read()
setup(
name="cobra",
version=version,
packages=find_packages(exclude=['cobra.oven', 'cobra.oven*']),
setup_requires=[],
install_requires=["six"],
tests_require=["jsonschema > 2.5"],
extras_require=extras,
ext_modules=ext_modules,
package_data={
'': ['test/data/*',
'VERSION',
'mlab/matlab_scripts/*m']},
author="Daniel Robert Hyduke <[email protected]>, "
"Ali Ebrahim <[email protected]>",
author_email="[email protected]",
description="COBRApy is a package for constraints-based modeling of "
"biological networks",
license="LGPL/GPL v2+",
keywords="metabolism biology linear programming optimization flux"
" balance analysis fba",
url="https://opencobra.github.io/cobrapy",
test_suite="cobra.test.suite",
download_url='https://pypi.python.org/pypi/cobra',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v2'
' or later (LGPLv2+)',
'License :: OSI Approved :: GNU General Public License v2'
' or later (GPLv2+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Cython',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
platforms="GNU/Linux, Mac OS X >= 10.7, Microsoft Windows >= 7",
**setup_kwargs)
| lgpl-2.1 |
GuessWhoSamFoo/pandas | pandas/tests/io/test_date_converters.py | 3 | 1289 | from datetime import datetime
import numpy as np
import pandas.util.testing as tm
import pandas.io.date_converters as conv
def test_parse_date_time():
dates = np.array(['2007/1/3', '2008/2/4'], dtype=object)
times = np.array(['05:07:09', '06:08:00'], dtype=object)
expected = np.array([datetime(2007, 1, 3, 5, 7, 9),
datetime(2008, 2, 4, 6, 8, 0)])
result = conv.parse_date_time(dates, times)
tm.assert_numpy_array_equal(result, expected)
def test_parse_date_fields():
days = np.array([3, 4])
months = np.array([1, 2])
years = np.array([2007, 2008])
result = conv.parse_date_fields(years, months, days)
expected = np.array([datetime(2007, 1, 3), datetime(2008, 2, 4)])
tm.assert_numpy_array_equal(result, expected)
def test_parse_all_fields():
hours = np.array([5, 6])
minutes = np.array([7, 8])
seconds = np.array([9, 0])
days = np.array([3, 4])
years = np.array([2007, 2008])
months = np.array([1, 2])
result = conv.parse_all_fields(years, months, days,
hours, minutes, seconds)
expected = np.array([datetime(2007, 1, 3, 5, 7, 9),
datetime(2008, 2, 4, 6, 8, 0)])
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
kayak/fireant | fireant/tests/queries/test_dimension_choices.py | 2 | 13142 | from unittest import TestCase
from unittest.mock import (
ANY,
MagicMock,
Mock,
patch,
)
import pandas as pd
from fireant import DataSet, DataType, Field
from fireant.tests.dataset.matchers import (
FieldMatcher,
PypikaQueryMatcher,
)
from fireant.tests.dataset.mocks import (
mock_dataset,
mock_hint_dataset,
politicians_table,
test_database,
)
# noinspection SqlDialectInspection,SqlNoDataSourceInspection
class DimensionsChoicesQueryBuilderTests(TestCase):
maxDiff = None
def test_query_choices_for_field(self):
query = mock_dataset.fields.political_party.choices.sql[0]
self.assertEqual(
"SELECT "
'"political_party" "$political_party" '
'FROM "politics"."politician" '
'GROUP BY "$political_party"',
str(query),
)
def test_query_choices_for_field_with_join(self):
query = mock_dataset.fields["district-name"].choices.sql[0]
self.assertEqual(
"SELECT "
'"district"."district_name" "$district-name" '
'FROM "politics"."politician" '
'FULL OUTER JOIN "locations"."district" '
'ON "politician"."district_id"="district"."id" '
'GROUP BY "$district-name"',
str(query),
)
def test_filter_choices(self):
query = (
mock_dataset.fields["candidate-name"]
.choices.filter(mock_dataset.fields.political_party.isin(["d", "r"]))
.sql[0]
)
self.assertEqual(
"SELECT "
'"candidate_name" "$candidate-name" '
'FROM "politics"."politician" '
"WHERE \"political_party\" IN ('d','r') "
'GROUP BY "$candidate-name"',
str(query),
)
# noinspection SqlDialectInspection,SqlNoDataSourceInspection
class DimensionsChoicesQueryBuilderWithHintTableTests(TestCase):
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
def test_query_choices_for_dataset_with_hint_table(self, mock_fetch_data: Mock):
mock_hint_dataset.fields.political_party.choices.fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"political_party" "$political_party" '
'FROM "politics"."hints" '
'WHERE NOT "political_party" IS NULL '
'GROUP BY "$political_party" '
'ORDER BY "$political_party"'
)
],
FieldMatcher(mock_hint_dataset.fields.political_party),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[
["candidate_name", "varchar(128)"],
["candidate_name_display", "varchar(128)"],
],
)
def test_query_choices_for_field_with_display_hint_table(
self, mock_get_column_definitions: Mock, mock_fetch_data: Mock
):
mock_hint_dataset.fields.candidate_name.choices.fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"candidate_name" "$candidate_name",'
'"candidate_name_display" '
'"$candidate_name_display" '
'FROM "politics"."hints" '
'WHERE NOT "candidate_name" IS NULL '
'GROUP BY "$candidate_name",'
'"$candidate_name_display" '
'ORDER BY "$candidate_name"'
)
],
FieldMatcher(mock_hint_dataset.fields.candidate_name),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[
["political_party", "varchar(128)"],
["state_id", "varchar(128)"],
],
)
def test_query_choices_for_filters_from_joins(self, mock_get_column_definitions: Mock, mock_fetch_data: Mock):
mock_hint_dataset.fields.political_party.choices.filter(
mock_hint_dataset.fields["district-name"].isin(["Manhattan"])
).filter(mock_hint_dataset.fields["state"].isin(["Texas"])).fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"hints"."political_party" "$political_party" '
'FROM "politics"."hints" '
'JOIN "locations"."state" ON '
'"hints"."state_id"="state"."id" '
'WHERE "state"."state_name" IN (\'Texas\') '
'AND NOT "hints"."political_party" IS NULL '
'GROUP BY "$political_party" '
'ORDER BY "$political_party"'
)
],
FieldMatcher(mock_hint_dataset.fields.political_party),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[
["political_party", "varchar(128)"],
["candidate_name", "varchar(128)"],
],
)
def test_query_choices_for_filters_from_base(self, mock_get_column_definitions: Mock, mock_fetch_data: Mock):
mock_hint_dataset.fields.political_party.choices.filter(
mock_hint_dataset.fields.candidate_name.isin(["Bill Clinton"])
).filter(mock_hint_dataset.fields["election-year"].isin([1992])).fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"political_party" "$political_party" '
'FROM "politics"."hints" '
"WHERE \"candidate_name\" IN ('Bill Clinton') "
'AND NOT "political_party" IS NULL '
'GROUP BY "$political_party" '
'ORDER BY "$political_party"'
)
],
FieldMatcher(mock_hint_dataset.fields.political_party),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[["political_party", "varchar(128)"]],
)
def test_query_choices_for_case_filter(self, mock_get_column_definitions: Mock, mock_fetch_data: Mock):
mock_hint_dataset.fields.political_party.choices.filter(
mock_hint_dataset.fields.political_party_case.isin(["Democrat", "Bill Clinton"])
).fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"political_party" "$political_party" '
'FROM "politics"."hints" '
'WHERE NOT "political_party" IS NULL '
'GROUP BY "$political_party" '
'ORDER BY "$political_party"'
)
],
FieldMatcher(mock_hint_dataset.fields.political_party),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[["district_name", "varchar(128)"]],
)
def test_query_choices_for_join_dimension(self, mock_get_column_definitions: Mock, mock_fetch_data: Mock):
mock_hint_dataset.fields["district-name"].choices.fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"district_name" "$district-name" '
'FROM "politics"."hints" '
'WHERE NOT "district_name" IS NULL '
'GROUP BY "$district-name" '
'ORDER BY "$district-name"'
)
],
FieldMatcher(mock_hint_dataset.fields["district-name"]),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[
["district_name", "varchar(128)"],
["candidate_name", "varchar(128)"],
],
)
def test_query_choices_for_join_dimension_with_filter_from_base(
self, mock_get_column_definitions: Mock, mock_fetch_data: Mock
):
mock_hint_dataset.fields["district-name"].choices.filter(
mock_hint_dataset.fields.candidate_name.isin(["Bill Clinton"])
).fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"district_name" "$district-name" '
'FROM "politics"."hints" '
"WHERE \"candidate_name\" IN ('Bill Clinton') "
'AND NOT "district_name" IS NULL '
'GROUP BY "$district-name" '
'ORDER BY "$district-name"'
)
],
FieldMatcher(mock_hint_dataset.fields["district-name"]),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[
["district_name", "varchar(128)"],
["district_id", "varchar(128)"],
],
)
def test_query_choices_for_join_dimension_with_filter_from_join(
self, mock_get_column_definitions: Mock, mock_fetch_data: Mock
):
mock_hint_dataset.fields["district-name"].choices.filter(
mock_hint_dataset.fields["district-name"].isin(["Manhattan"])
).fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"hints"."district_name" "$district-name" '
'FROM "politics"."hints" '
'FULL OUTER JOIN "locations"."district" ON '
'"hints"."district_id"="district"."id" '
'WHERE "district"."district_name" IN ('
"'Manhattan') "
'AND NOT "hints"."district_name" IS NULL '
'GROUP BY "$district-name" '
'ORDER BY "$district-name"'
)
],
FieldMatcher(mock_hint_dataset.fields["district-name"]),
)
# noinspection SqlDialectInspection,SqlNoDataSourceInspection
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
class DimensionsChoicesFetchTests(TestCase):
def test_query_choices_for_field(self, mock_fetch_data: Mock):
mock_dataset.fields.political_party.choices.fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"political_party" "$political_party" '
'FROM "politics"."politician" '
'WHERE NOT "political_party" IS NULL '
'GROUP BY "$political_party" '
'ORDER BY "$political_party"'
)
],
FieldMatcher(mock_dataset.fields.political_party),
)
def test_envelopes_responses_if_return_additional_metadata_True(self, mock_fetch_data):
mock_dataset = DataSet(
table=politicians_table,
database=test_database,
return_additional_metadata=True,
fields=[
Field(
"political_party",
label="Party",
definition=politicians_table.political_party,
data_type=DataType.text,
hyperlink_template="http://example.com/{political_party}",
)
],
)
df = pd.DataFrame({'political_party': ['a', 'b', 'c']}).set_index('political_party')
mock_fetch_data.return_value = 100, df
result = mock_dataset.fields.political_party.choices.fetch()
self.assertEqual(dict(max_rows_returned=100), result['metadata'])
self.assertTrue(
pd.Series(['a', 'b', 'c'], index=['a', 'b', 'c'], name='political_party').equals(result['data'])
)
| apache-2.0 |
sonnyhu/scikit-learn | examples/covariance/plot_covariance_estimation.py | 99 | 5074 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.model_selection import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
poryfly/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/types/missing.py | 7 | 11463 | """
missing types & inference
"""
import numpy as np
from pandas import lib
from pandas.tslib import NaT, iNaT
from .generic import (ABCMultiIndex, ABCSeries,
ABCIndexClass, ABCGeneric)
from .common import (is_string_dtype, is_datetimelike,
is_datetimelike_v_numeric, is_float_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
is_timedelta64_dtype,
is_complex_dtype, is_categorical_dtype,
is_string_like_dtype, is_bool_dtype,
is_integer_dtype, is_dtype_equal,
needs_i8_conversion, _ensure_object,
pandas_dtype,
is_scalar,
is_object_dtype,
is_integer,
_TD_DTYPE,
_NS_DTYPE,
_DATELIKE_DTYPES)
from .inference import is_list_like
def isnull(obj):
"""Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
Parameters
----------
arr : ndarray or object value
Object to check for null-ness
Returns
-------
isnulled : array-like of bool or bool
Array or bool indicating whether an object is null or if an array is
given which of the element is null.
See also
--------
pandas.notnull: boolean inverse of pandas.isnull
"""
return _isnull(obj)
def _isnull_new(obj):
if is_scalar(obj):
return lib.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isnull is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isnull_ndarraylike(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isnull(func=isnull))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike(np.asarray(obj))
else:
return obj is None
def _isnull_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
return lib.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isnull is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isnull_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isnull(func=_isnull_old))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike_old(np.asarray(obj))
else:
return obj is None
_isnull = _isnull_new
def _use_inf_as_null(key):
"""Option change callback for null/inf behaviour
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* http://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
from pandas.core.config import get_option
flag = get_option(key)
if flag:
globals()['_isnull'] = _isnull_old
else:
globals()['_isnull'] = _isnull_new
def _isnull_ndarraylike(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if is_string_dtype(dtype):
if is_categorical_dtype(values):
from pandas import Categorical
if not isinstance(values, Categorical):
values = values.values
result = values.isnull()
else:
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj(values.ravel())
result[...] = vec.reshape(shape)
elif needs_i8_conversion(obj):
# this is the NaT pattern
result = values.view('i8') == iNaT
else:
result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def _isnull_ndarraylike_old(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if is_string_dtype(dtype):
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj_old(values.ravel())
result[:] = vec.reshape(shape)
elif dtype in _DATELIKE_DTYPES:
# this is the NaT pattern
result = values.view('i8') == iNaT
else:
result = ~np.isfinite(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def notnull(obj):
"""Replacement for numpy.isfinite / -numpy.isnan which is suitable for use
on object arrays.
Parameters
----------
arr : ndarray or object value
Object to check for *not*-null-ness
Returns
-------
isnulled : array-like of bool or bool
Array or bool indicating whether an object is *not* null or if an array
is given which of the element is *not* null.
See also
--------
pandas.isnull : boolean inverse of pandas.notnull
"""
res = isnull(obj)
if is_scalar(res):
return not res
return ~res
def is_null_datelike_scalar(other):
""" test whether the object is a null datelike, e.g. Nat
but guard against passing a non-scalar """
if other is NaT or other is None:
return True
elif is_scalar(other):
# a timedelta
if hasattr(other, 'dtype'):
return other.view('i8') == iNaT
elif is_integer(other) and other == iNaT:
return True
return isnull(other)
return False
def _is_na_compat(arr, fill_value=np.nan):
"""
Parameters
----------
arr: a numpy array
fill_value: fill value, default to np.nan
Returns
-------
True if we can fill using this fill_value
"""
dtype = arr.dtype
if isnull(fill_value):
return not (is_bool_dtype(dtype) or
is_integer_dtype(dtype))
return True
def array_equivalent(left, right, strict_nan=False):
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs
in corresponding locations. False otherwise. It is assumed that left and
right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
strict_nan : bool, default False
If True, consider NaN and None to be different.
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(
... np.array([1, 2, np.nan]),
... np.array([1, 2, np.nan]))
True
>>> array_equivalent(
... np.array([1, np.nan, 2]),
... np.array([1, 2, np.nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
# shape compat
if left.shape != right.shape:
return False
# Object arrays can contain None, NaN and NaT.
# string dtypes must be come to this path for NumPy 1.7.1 compat
if is_string_dtype(left) or is_string_dtype(right):
if not strict_nan:
# isnull considers NaN and None to be equivalent.
return lib.array_equivalent_object(
_ensure_object(left.ravel()), _ensure_object(right.ravel()))
for left_value, right_value in zip(left, right):
if left_value is NaT and right_value is not NaT:
return False
elif isinstance(left_value, float) and np.isnan(left_value):
if (not isinstance(right_value, float) or
not np.isnan(right_value)):
return False
else:
if left_value != right_value:
return False
return True
# NaNs can occur in float and complex arrays.
if is_float_dtype(left) or is_complex_dtype(left):
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
# numpy will will not allow this type of datetimelike vs integer comparison
elif is_datetimelike_v_numeric(left, right):
return False
# M8/m8
elif needs_i8_conversion(left) and needs_i8_conversion(right):
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.view('i8')
right = right.view('i8')
# NaNs cannot occur otherwise.
try:
return np.array_equal(left, right)
except AttributeError:
# see gh-13388
#
# NumPy v1.7.1 has a bug in its array_equal
# function that prevents it from correctly
# comparing two arrays with complex dtypes.
# This bug is corrected in v1.8.0, so remove
# this try-except block as soon as we stop
# supporting NumPy versions < 1.8.0
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.tolist()
right = right.tolist()
return left == right
def _infer_fill_value(val):
"""
infer the fill value for the nan/NaT from the provided
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
element to provide proper block construction
"""
if not is_list_like(val):
val = [val]
val = np.array(val, copy=False)
if is_datetimelike(val):
return np.array('NaT', dtype=val.dtype)
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(_ensure_object(val))
if dtype in ['datetime', 'datetime64']:
return np.array('NaT', dtype=_NS_DTYPE)
elif dtype in ['timedelta', 'timedelta64']:
return np.array('NaT', dtype=_TD_DTYPE)
return np.nan
def _maybe_fill(arr, fill_value=np.nan):
"""
if we have a compatiable fill_value and arr dtype, then fill
"""
if _is_na_compat(arr, fill_value):
arr.fill(fill_value)
return arr
def na_value_for_dtype(dtype):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
Returns
-------
np.dtype or a pandas dtype
"""
dtype = pandas_dtype(dtype)
if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or
is_timedelta64_dtype(dtype)):
return NaT
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
return 0
elif is_bool_dtype(dtype):
return False
return np.nan
| apache-2.0 |
sirvsuite-support/sirvsuite | tools/SIRV_Calculate_quality_metrics.py | 1 | 42925 | """
SIRV_Calculate_quality metrics.py
This script first normalizes and filters the counts in SIRVs and endogenous RNA using trumpet plots. Afterwards accuracy and precision is calculated.
see python path/to/SIRV_Calculate_quality_metrics.py -h
updated: 25 Oct 2016 Patrick Schagerl
Last revision: IH181010
(c)2016 Lexogen GmbH
Examples:
python package_SIRVs/V4_final_testing_C_I_O/SIRV_Calculate_quality_metrics.py -count_list input_files/alignments/NGS1.73_star_out/incomplete/E*/isoforms.fpkm_tracking -e "Fast_1:20" -s "sample 1" "sample 1" "sample 2" "sample 2" "sample 3" "sample 3" -c E0 E0 E1 E1 E2 E2 -per_SIRV 3 3 3 3 3 3 -lb 0.015625 -ub 4 -tm s -quant cufflinks -o results/SIRV_quality_s_I_
python package_SIRVs/V4_final_testing_C_I_O/SIRV_Calculate_quality_metrics.py -count_list input_files/alignments/NGS1.73_star_out/complete/E*/isoforms.fpkm_tracking -e "Fast_1:20" -s "sample 1" "sample 1" "sample 2" "sample 2" "sample 3" "sample 3" -c E0 E0 E1 E1 E2 E2 -per_SIRV 3 3 3 3 3 3 -lb 0.015625 -ub 4 -tm s -quant cufflinks -o results/SIRV_quality_s_C_
python package_SIRVs/V4_final_testing_C_I_O/SIRV_Calculate_quality_metrics.py -count_list input_files/alignments/NGS1.73_star_out/overannotated/E*/isoforms.fpkm_tracking -e "Fast_1:20" -s "sample 1" "sample 1" "sample 2" "sample 2" "sample 3" "sample 3" -c E0 E0 E1 E1 E2 E2 -per_SIRV 3 3 3 3 3 3 -lb 0.015625 -ub 4 -tm s -quant cufflinks -o results/SIRV_quality_s_O_
python package_SIRVs/V4_final_testing_C_I_O/SIRV_Calculate_quality_metrics.py -count_list input_files/alignments/NGS1.73_star_out/incomplete/E*/isoforms.fpkm_tracking -e "Fast_1:20" -s "sample 1" "sample 1" "sample 2" "sample 2" "sample 3" "sample 3" -c E0 E0 E1 E1 E2 E2 -per_SIRV 3 3 3 3 3 3 -lb 0.015625 -ub 4 -tm m -tmm 0.01 -quant cufflinks -o results/SIRV_quality_m_I_
python package_SIRVs/V4_final_testing_C_I_O/SIRV_Calculate_quality_metrics.py -count_list input_files/alignments/NGS1.73_star_out/complete/E*/isoforms.fpkm_tracking -e "Fast_1:20" -s "sample 1" "sample 1" "sample 2" "sample 2" "sample 3" "sample 3" -c E0 E0 E1 E1 E2 E2 -per_SIRV 3 3 3 3 3 3 -lb 0.015625 -ub 4 -tm m -tmm 0.01 -quant cufflinks -o results/SIRV_quality_m_C_
python package_SIRVs/V4_final_testing_C_I_O/SIRV_Calculate_quality_metrics.py -count_list input_files/alignments/NGS1.73_star_out/overannotated/E*/isoforms.fpkm_tracking -e "Fast_1:20" -s "sample 1" "sample 1" "sample 2" "sample 2" "sample 3" "sample 3" -c E0 E0 E1 E1 E2 E2 -per_SIRV 3 3 3 3 3 3 -lb 0.015625 -ub 4 -tm m -tmm 0.01 -quant cufflinks -o results/SIRV_quality_m_O_
"""
import SIRV_links
import sys
### necessary that matplotlib can be found
sys.path.append(SIRV_links.matplotlib)
### necessary that scipy can be found
sys.path.append(SIRV_links.scipy)
### imports
import numpy
import matplotlib
import matplotlib.pyplot as plt
import math
import argparse
from matplotlib.pyplot import *
import matplotlib.patches as mpatches
from scipy import stats
import random
import SIRV_preprocessing_inputs
import csv
#import json
import textwrap
### function
def quality_metrics(experiment, sample, replicates, controls, SIRV_mix_files, count_SIRVs, count_eRNA, per_SIRV, thres, lower_boundary, upper_boundary, disable_output, disable_plot, threshold_method, threshold_value, output_quality_file, summary_exp):
if not disable_output:
print "obtain quality metrics"
print "obtain information of the SIRVs and experiment, samples, controls"
# get the number of samples and the array repl incr (replicate increment) it shows e.g. [0, 3, 5, 8] meaning that there are replicates 0, 1, 2 (sample 1); 3, 4 (sample 2) and 5, 6, 7 (sample 3)
samples = len(controls)
repl_incr = numpy.zeros(len(replicates) + 1, dtype=int)
repl_incr[0] = 0
for i in range(0, len(replicates)):
repl_incr[i + 1] = sum(replicates[0 : i + 1])
# get the experiment assigned to every sample
experiment_assigned = []
sample_assigned = []
for i in repl_incr[ : -1]:
experiment_assigned.append(experiment)
sample_assigned.append(sample[i])
# obtain the SIRVs version
SIRV_type=None
if "SIRV103" not in count_SIRVs:
SIRV_type = "I"
elif "SIRV104" in count_SIRVs:
SIRV_type = "O"
else:
SIRV_type = "C"
# obtain SIRV information (sum, abundance, gene, submix)
SIRV, sum_mix, submixes = SIRV_preprocessing_inputs.get_SIRV_information(SIRV_mix_files,SIRV_type)
# obtain the sum and calculate the mean sum of the replicates --> obtain scaling factors (normalization) for SIRVs
number_fields = sum(replicates)
sum_repl_count = numpy.sum(count_SIRVs.values(), axis=0)
mean_sum_repl_count = numpy.zeros(sum(replicates))
norm_factor = numpy.zeros(sum(replicates))
for i in range(0, samples):
mean_sum_repl_count[repl_incr[i] : repl_incr[i + 1]] = numpy.mean(sum_repl_count[repl_incr[i] : repl_incr[i + 1]])
norm_factor[repl_incr[i]:repl_incr[i + 1]] = float(1)/(numpy.mean(sum_repl_count[repl_incr[i] : repl_incr[i + 1]]))*sum_mix[controls[i]]
# normalize the values and minimum value thres
normalized_values = {}
for key in count_SIRVs:
normalized_values[key] = count_SIRVs[key]*norm_factor
normalized_values[key][normalized_values[key] < thres] = thres
mean_normalized_values = {}
for key in count_SIRVs:
mean_normalized_values[key] = []
for i in range(0, samples):
mean_normalized_values[key].append(numpy.mean(normalized_values[key][repl_incr[i] : repl_incr[i + 1]]))
#### Calculate the LFC (log2(measured/expected) and CV (std in values of samples (replicates)) ####
if not disable_output:
print "calculate LFC and CV in SIRVs"
LFC = {}
for key in mean_normalized_values:
if SIRV[key]['E0'] != 0:
LFC[key] = numpy.empty(samples)
for i in range(0, len(controls)):
LFC[key][i] = math.log(mean_normalized_values[key][i]/SIRV[key][controls[i]], 2)
all_CV = []
all_CV_nan=False
for i in range(0, samples):
if repl_incr[i + 1] - repl_incr[i] > 1:
mean = numpy.mean(numpy.array(normalized_values.values(), float)[ : , repl_incr[i] : repl_incr[i + 1]], axis=1)
std = numpy.std(numpy.array(normalized_values.values(),float)[ : , repl_incr[i] : repl_incr[i + 1]], ddof=1, axis=1)
all_CV.append(std/mean)
else:
all_CV.append(numpy.full(len(SIRV), float('nan')))
all_CV_nan=True
all_CV = numpy.transpose(all_CV)
#### Acc, Precision in SIRVs ####
### get the samples which belong to one experiment
if not disable_output:
print "calculate accuracy and precision in SIRVs"
experiment_sample_columns = {}
for i in range(0, len(experiment_assigned)):
if experiment_assigned[i] not in experiment_sample_columns:
experiment_sample_columns[experiment_assigned[i]] = []
experiment_sample_columns[experiment_assigned[i]].append(i)
experiment_prec = []
xlabels = []
for key in sorted(experiment_sample_columns):
####TODO nanmean originally
if all_CV_nan==False:
experiment_prec.append(numpy.nanmean(all_CV[ : , experiment_sample_columns[key]]))
else:
experiment_prec.append(float('nan'))
xlabels.append(key)
### obtain values of eRNA and precision of eRNA
if not disable_output:
print "obtain eRNA values"
# for key in count_eRNA:
# if len(count_eRNA[key])!=6:
# print key, count_eRNA[key]
sum_repl_eRNA = numpy.sum(count_eRNA.values(), axis=0)
# get controls of every replicate
controls_all_repl = []
for i in range(0, samples):
for a in range(0, replicates[i]):
controls_all_repl.append(controls[i])
# obtain the scaling factor for eRNA
mean_sum_repl_eRNA = numpy.zeros(sum(replicates))
scaling_factor = numpy.zeros(sum(replicates))
for i in range(0, samples):
mean_sum_repl_eRNA[repl_incr[i] : repl_incr[i + 1]] = numpy.mean(sum_repl_eRNA[repl_incr[i] : repl_incr[i + 1]])
array_SIRV_value = numpy.full(len(per_SIRV), 69.5, float)
spike_in_time = array_SIRV_value*100/per_SIRV
scaling_factor = spike_in_time/mean_sum_repl_eRNA
if not disable_output:
print "normalize with scaling factor"
# normalize the values
normalized_values_eRNA = count_eRNA.values()*scaling_factor
sum_repl_eRNA = numpy.sum(normalized_values_eRNA, axis=0)
sum_before = numpy.zeros(sum(replicates))
for i in range(0, samples):
sum_before[repl_incr[i] : repl_incr[i + 1]] = numpy.mean(sum_repl_eRNA[repl_incr[i] : repl_incr[i + 1]])
# see if one value is below the detection threshold
normalized_values_eRNA[normalized_values_eRNA < thres] = thres
if not disable_output:
print "create values for plot"
# take the log and make a trumpet plot --> with quadratic regression obtain threshold for filtering of replicates with high abundance if another replicate is below the detection threshold
log_norm_values_eRNA = numpy.log10(normalized_values_eRNA)
if not disable_output:
print "get max values"
values_rand_x = []
values_rand_y = []
# for testing
# raise NameError('test0')
### sample specific obtaining the threshold
if threshold_method != "m" or threshold_method == "m" and threshold_value is None:
mean_p0 = {}
mean_p1 = {}
mean_p2 = {}
i_fig = 0
for key in experiment_sample_columns:
mean_p0[key] = {}
mean_p1[key] = {}
mean_p2[key] = {}
### some where here
for index_sample in range(0, len(experiment_sample_columns[key])):
mean_p0[key][experiment_sample_columns[key][index_sample]] = []
mean_p1[key][experiment_sample_columns[key][index_sample]] = []
mean_p2[key][experiment_sample_columns[key][index_sample]] = []
for trumpet_replicate_1 in range(repl_incr[experiment_sample_columns[key][index_sample]], repl_incr[experiment_sample_columns[key][index_sample] + 1]):
##### here is the problem????
for trumpet_replicate_2 in range(trumpet_replicate_1 + 1, repl_incr[experiment_sample_columns[key][index_sample] + 1]):
values = {}
for i in range(0, len(normalized_values_eRNA)):
if round(math.log(normalized_values_eRNA[i,trumpet_replicate_1], 10), 1) not in values:
values[round(math.log(normalized_values_eRNA[i, trumpet_replicate_1], 10), 1)] = math.log(normalized_values_eRNA[i, trumpet_replicate_2], 10)
else:
if math.log(normalized_values_eRNA[i, trumpet_replicate_2], 10) > values[round(math.log(normalized_values_eRNA[i, trumpet_replicate_1], 10), 1)]:
values[round(math.log(normalized_values_eRNA[i, trumpet_replicate_1], 10), 1)] = math.log(normalized_values_eRNA[i, trumpet_replicate_2], 10)
# if not disable_output:
# print "threshold estimation"
### estimate the threshold of the filtering using different approaches
#raise NameError('in the loop')
#print values
temp_x = values.keys()
temp_y = values.values()
# approach 1: values log(10^-6)...0
# x = []
# y = []
# for i in range(0, len(temp_x)):
# if temp_x[i] > round(math.log(thres, 10), 1) and temp_x[i] < 0:
# x.append(temp_x[i])
# y.append(temp_y[i])
# # approach 2: values < 0
# x2 = []
# y2 = []
# for i in range(0, len(temp_x)):
# if temp_x[i] < 0:
# x2.append(temp_x[i])
# y2.append(temp_y[i])
# # approach 3: values above the 22,5 degree line (not y = x but y=x/2)
# x3 = []
# y3 = []
# for i in range(0, len(temp_x)):
# if 10**temp_y[i] > 10**temp_x[i]/2:
# x3.append(temp_x[i])
# y3.append(temp_y[i])
# approach 4 (1 and 3 combined): values above log(10^6) and values above the 22,5 degree line (not y = x but y = x/2)
x13 = []
y13 = []
for i in range(0, len(temp_x)):
if temp_x[i] > round(math.log(thres, 10), 1) and 10**temp_y[i] > 10**temp_x[i]/2:
x13.append(temp_x[i])
y13.append(temp_y[i])
# get the linear regression p
# p = numpy.poly1d(numpy.polyfit(x, y, 2))
# ptemp = numpy.poly1d(numpy.polyfit(temp_x, temp_y, 2))
# p2 = numpy.poly1d(numpy.polyfit(x2, y2, 2))
# p3 = numpy.poly1d(numpy.polyfit(x3, y3, 2))
p13 = numpy.poly1d(numpy.polyfit(x13, y13, 2))
mean_p0[key][experiment_sample_columns[key][index_sample]].append(p13[0])
mean_p1[key][experiment_sample_columns[key][index_sample]].append(p13[1])
mean_p2[key][experiment_sample_columns[key][index_sample]].append(p13[2])
for i in range(0, len(log_norm_values_eRNA[ : ,trumpet_replicate_1])):
if random.random() <= float(1)/repl_incr[-1]:
values_rand_x.append(log_norm_values_eRNA[i, trumpet_replicate_1])
values_rand_y.append(log_norm_values_eRNA[i, trumpet_replicate_2])
x_lin = numpy.linspace(min(temp_x), max(temp_x), 1000)
fig, ax = plt.subplots(1, figsize=(8, 8))
ax.scatter(log_norm_values_eRNA[ : , trumpet_replicate_1], log_norm_values_eRNA[ : , trumpet_replicate_2], alpha=0.25, color="#003c5a")
#ax.scatter(values_rand_x, values_rand_y, alpha=0.25, color="#003c5a")
# plot the regression
# ax.plot(x_lin, p(x_lin), color="r")
# ax.plot(x_lin, p2(x_lin), color="g")
# ax.plot(x_lin, ptemp(x_lin), color="b")
# ax.plot(x_lin, p3(x_lin), color="c")
ax.plot(x_lin, p13(x_lin), color="k")
ax.plot(p13(x_lin), x_lin, color="k")
ax.set_xlabel(experiment + " " + sample[trumpet_replicate_1] + " " + controls_all_repl[trumpet_replicate_1] + "\n" + "log(value)")
ax.set_ylabel(experiment + " " + sample[trumpet_replicate_2] + " " + controls_all_repl[trumpet_replicate_2] + "\n" + "log(value)")
ax.set_title('trumpet plot before filtering')
fig.subplots_adjust(left=0.1, bottom=0.08, right=0.97, top=0.96)
plt.xlim(math.floor(math.log(thres, 10)) - 1, math.ceil(numpy.max(log_norm_values_eRNA))+1)
plt.ylim(math.floor(math.log(thres, 10)) - 1, math.ceil(numpy.max(log_norm_values_eRNA))+1)
# plt.savefig("test_trumpet.svg", format="svg")
# plt.savefig("test_trumpet.ps", format="ps")
#plt.savefig("individual_trumpet/" + args.output_file + "_trumpet_before_filtering_" + str(key) + "_" + str(i_fig) + ".png", format="png")
i_fig += 1
plt.close()
else:
for key in experiment_sample_columns:
for index_sample in range(0, len(experiment_sample_columns[key])):
for trumpet_replicate_1 in range(repl_incr[experiment_sample_columns[key][index_sample]], repl_incr[experiment_sample_columns[key][index_sample] + 1]):
for trumpet_replicate_2 in range(trumpet_replicate_1 + 1, repl_incr[experiment_sample_columns[key][index_sample] + 1]):
for i in range(0, len(log_norm_values_eRNA[ : , trumpet_replicate_1])):
if random.random() <= float(1)/repl_incr[-1]:
values_rand_x.append(log_norm_values_eRNA[i, trumpet_replicate_1])
values_rand_y.append(log_norm_values_eRNA[i, trumpet_replicate_2])
#raise NameError('test0')
if threshold_method == "s":
###sample specific
p_temp = p13
estimated_filtering_thres_trumpet = numpy.zeros(repl_incr[-1])
for key1 in mean_p0:
for key2 in mean_p0[key1]:
p_temp[0] = mean_p0[key1][key2] = numpy.mean(mean_p0[key1][key2])
p_temp[1] = mean_p1[key1][key2] = numpy.mean(mean_p1[key1][key2])
p_temp[2] = mean_p2[key1][key2] = numpy.mean(mean_p2[key1][key2])
estimated_filtering_thres_trumpet[repl_incr[key2] : repl_incr[key2 + 1]] = 10**p_temp(-6)
if threshold_method == "m":
### manually
if threshold_value is not None:
estimated_filtering_thres_trumpet = numpy.full(repl_incr[-1], threshold_value)
else:
if not disable_output:
print "threshold was not set --> using experiment specific threshold -tm e"
threshold_method = "e"
if threshold_method == "e":
###experiment specific
p_temp = p13
estimated_filtering_thres_trumpet = numpy.zeros(repl_incr[-1])
for key1 in mean_p0:
overall_p0 = []
overall_p1 = []
overall_p2 = []
for key2 in mean_p0[key1]:
overall_p0.append(mean_p0[key1][key2])
overall_p1.append(mean_p1[key1][key2])
overall_p2.append(mean_p2[key1][key2])
p_temp[0] = mean_p0[key1][key2] = numpy.mean(overall_p0)
p_temp[1] = mean_p1[key1][key2] = numpy.mean(overall_p1)
p_temp[2] = mean_p2[key1][key2] = numpy.mean(overall_p2)
for key2 in mean_p0[key1]:
estimated_filtering_thres_trumpet[repl_incr[key2] : repl_incr[key2 + 1]] = 10**p_temp(-6)
#print numpy.log10(estimated_filtering_thres_trumpet)
if values_rand_x != []:
#x_lin = numpy.linspace(min(temp_x), max(temp_x), 1000)
x_lin = numpy.linspace(min(values_rand_x), max(values_rand_x), 1000)
# plt.scatter(x, y)
# plt.plot(x_lin, p(x_lin))
# plt.show()
#
# plt.scatter(x2, y2)
# plt.plot(x_lin, p2(x_lin))
# plt.show()
#
# plt.scatter(x3, y3)
# plt.plot(x_lin, p3(x_lin))
# plt.show()
#
# plt.scatter(x13, y13)
# plt.plot(x_lin, p13(x_lin))
# plt.xlim(-7, 4)
# plt.ylim(-7, 4)
# plt.show()
#
# plt.scatter(temp_x, temp_y)
# plt.plot(x_lin, ptemp(x_lin))
# plt.show()
# plt.plot(x, p(x))
if not disable_output and threshold_method == "e":
print "estimated experiment threshold for samples with one replicate having thres (10^-6) as abundance estimate"
print "estimated experiment log threshold", str(round(p13(-6), 2))
print "estimated experiment threshold", str(round(10**p13(-6), 6))
### plot the trumpet plot
fig, ax = plt.subplots(1, figsize=(8, 8))
#ax.scatter(log_norm_values_eRNA[ : , 0], log_norm_values_eRNA[ : , 1], alpha=0.25, color="#003c5a")
ax.scatter(values_rand_x, values_rand_y, alpha=0.25, color="#003c5a")
# plot the regression
# ax.plot(x_lin, p(x_lin), color="r")
# ax.plot(x_lin, p2(x_lin), color="g")
# ax.plot(x_lin, ptemp(x_lin), color="b")
# ax.plot(x_lin, p3(x_lin), color="c")
if threshold_method != "m" or threshold_method == "m" and threshold_value is None:
ax.plot(x_lin, p13(x_lin), color="k")
ax.plot(p13(x_lin), x_lin, color="k")
ax.set_xlabel("random values x" + "\n" + "log(value)")
ax.set_ylabel("random values y" + "\n" + "log(value)")
#ax.set_xlabel(experiment[0] + " " + sample[0] + " " + controls[0] + "\n" + "log(count)")
#ax.set_ylabel(experiment[1] + " " + sample[1] + " " + controls[1] + "\n" + "log(count)")
ax.set_title('trumpet plot before filtering')
fig.subplots_adjust(left=0.1, bottom=0.08, right=0.97, top=0.96)
#plt.xlim(math.floor(math.log(thres, 10)) - 1, math.ceil(numpy.max(log_norm_values_eRNA[ : , [trumpet_replicate_1, trumpet_replicate_2]])) + 1)
#plt.ylim(math.floor(math.log(thres, 10)) - 1, math.ceil(numpy.max(log_norm_values_eRNA[ : , [trumpet_replicate_1, trumpet_replicate_2]])) + 1)
plt.xlim(math.floor(math.log(thres, 10)) - 1, math.ceil(numpy.max(log_norm_values_eRNA)) + 1)
plt.ylim(math.floor(math.log(thres, 10)) - 1, math.ceil(numpy.max(log_norm_values_eRNA)) + 1)
# plt.savefig("test_trumpet.svg", format="svg")
#plt.savefig(args.output_file + "_trumpet_before_filtering.eps", format="eps")
# plt.savefig("test_trumpet.ps", format="ps")
plt.savefig(output_quality_file + "output_quality_trumpet_before_filtering.png", format="png")
if summary_exp is not None:
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS'] = {}
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["OVERVIEW_TRUMPET"] = {}
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["OVERVIEW_TRUMPET"]["BEFORE_FILTERING"] = output_quality_file + "output_quality_trumpet_before_filtering.png"
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["OVERVIEW_TRUMPET"]["BEFORE_FILTERING_LEGEND"] = "Figure: Trumpet before filtering (random values from all replicates within one sample)"
if not disable_plot:
#plt.show()
plt.close()
else:
plt.close()
### take a look at the values with thres (10^-6) and filter the values >= 10**(xxx)
# normalized_values_eRNA = temp
array_filtered_transcripts = []
for line in range(0, len(normalized_values_eRNA)):
for i1 in range(0, samples):
inspect = False
for i2 in range(repl_incr[i1], repl_incr[i1 + 1]):
if normalized_values_eRNA[line][i2] == thres:
inspect = True
if inspect:
for i2 in range(repl_incr[i1], repl_incr[i1 + 1]):
if normalized_values_eRNA[line][i2] >= estimated_filtering_thres_trumpet[i2]:
### write the filtered transcripts in a csv file
s = []
s.append(count_eRNA.keys()[line])
s.append(sample[i1])
s.append(controls[i1])
s.append(normalized_values_eRNA[line][i2])
normalized_values_eRNA[line][i2] = thres
### write the filtered transcripts in a csv file
s.append(estimated_filtering_thres_trumpet[i2])
s.append(thres)
array_filtered_transcripts.append(s)
# IH181010 PROBLEM HERE the method is repoducibly crashing in the following for cycle
# IH181010 OLD
# filtered_transcripts=numpy.array(array_filtered_transcripts)
# filtered_transcripts_sorted=[]
# for i in range(0,len(filtered_transcripts)):
# filtered_transcripts_sorted.append(filtered_transcripts[numpy.argmax(filtered_transcripts[:,3])])
# filtered_transcripts=numpy.delete(filtered_transcripts,numpy.argmax(filtered_transcripts[:,3]),0)
# IH181010 NEW
filtered_transcripts_sorted = sorted(array_filtered_transcripts, key=lambda x: x[3])
# print filtered_transcripts_sorted
with open(output_quality_file + "filtered_transcripts.csv", 'w') as csvfile:
writer = csv.writer(csvfile)
s = []
s.append("transcript name")
s.append("sample name")
s.append("control")
s.append("value before filtering")
s.append("filtering threshold")
s.append("value after filtering")
writer.writerow(s)
for item in filtered_transcripts_sorted:
writer.writerow(item)
if summary_exp is not None:
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["FILTERED_TRANSCRIPTS_CSV"] = output_quality_file + "filtered_transcripts.csv"
### normalize the values again to the sum because a lot of values were reduced to 10^-6
sum_repl_eRNA = numpy.sum(normalized_values_eRNA, axis=0)
if not disable_output:
print "normalize again"
sum_after = numpy.zeros(sum(replicates))
scaling_factor = numpy.zeros(sum(replicates))
for i in range(0, samples):
#print i
sum_after[repl_incr[i] : repl_incr[i + 1]] = numpy.mean(sum_repl_eRNA[repl_incr[i] : repl_incr[i + 1]])
scaling_factor = sum_before/sum_after
#print scaling_factor
for i in range(0, len(scaling_factor)):
normalized_values_eRNA[normalized_values_eRNA[ : , i] != thres, i] = normalized_values_eRNA[normalized_values_eRNA[ : , i] != thres, i]*scaling_factor[i]
log_norm_values_eRNA = numpy.log10(normalized_values_eRNA)
if values_rand_x != []:
values_rand_x = []
values_rand_y = []
### obtain random values again
for key in experiment_sample_columns:
for index_sample in range(0, len(experiment_sample_columns[key])):
for trumpet_replicate_1 in range(repl_incr[experiment_sample_columns[key][index_sample]], repl_incr[experiment_sample_columns[key][index_sample] + 1]):
for trumpet_replicate_2 in range(trumpet_replicate_1 + 1, repl_incr[experiment_sample_columns[key][index_sample] + 1]):
for i in range(0, len(log_norm_values_eRNA[ : , trumpet_replicate_1])):
if random.random() <= float(1)/repl_incr[-1]:
values_rand_x.append(log_norm_values_eRNA[i, trumpet_replicate_1])
values_rand_y.append(log_norm_values_eRNA[i, trumpet_replicate_2])
### plot the trumpet plot with adjusted values
fig, ax = plt.subplots(1, figsize=(8, 8))
ax.scatter(values_rand_x, values_rand_y, alpha=0.25, color="#003c5a")
#ax.scatter(log_norm_values_eRNA[ : , 0], log_norm_values_eRNA[ : , 1], alpha=0.25, color="#003c5a")
#ax.set_xlabel(experiment[0] + " " + sample[0] + " " + controls[0] + "\n" + "log(count)")
#ax.set_ylabel(experiment[1] + " " + sample[1] + " " + controls[1] + "\n" + "log(count)")
ax.set_xlabel("random values x" + "\n" + "log(value)")
ax.set_ylabel("random values y" + "\n" + "log(value)")
ax.set_title('trumpet plot after filtering')
fig.subplots_adjust(left=0.1, bottom=0.075, right=0.97, top=0.965)
plt.xlim(math.floor(math.log(thres, 10)) - 1, math.ceil(numpy.max(log_norm_values_eRNA)) + 1)
plt.ylim(math.floor(math.log(thres, 10)) - 1, math.ceil(numpy.max(log_norm_values_eRNA)) + 1)
# plt.savefig("test_trumpet.svg", format="svg")
#plt.savefig(args.output_file + "_trumpet_after_filtering.eps", format="eps")
# plt.savefig("test_trumpet.ps", format="ps")
plt.savefig(output_quality_file + "output_quality_trumpet_after_filtering.png", format="png")
if summary_exp is not None:
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["OVERVIEW_TRUMPET"]["AFTER_FILTERING"] = output_quality_file + "output_quality_trumpet_after_filtering.png"
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["OVERVIEW_TRUMPET"]["AFTER_FILTERING_LEGEND"] = "Figure: Trumpet after filtering (random values from all replicates within one sample)"
if not disable_plot:
#plt.show()
plt.close()
else:
plt.close()
# obtain the CV values of eRNA (precision)
all_mean_eRNA = []
all_std_eRNA = []
number_replicates_one=False
for i in range(0, samples):
if repl_incr[i + 1] - repl_incr[i] > 1:
mean = numpy.mean(normalized_values_eRNA[ : , repl_incr[i] : repl_incr[i + 1]], axis=1)
std = numpy.std(normalized_values_eRNA[ : , repl_incr[i] : repl_incr[i + 1]], ddof=1, axis=1)
else:
number_replicates_one=True
mean = numpy.full(len(normalized_values_eRNA), float('nan'))
std = numpy.full(len(normalized_values_eRNA), float('nan'))
all_mean_eRNA.append(mean)
all_std_eRNA.append(std)
if number_replicates_one==False:
all_mean_eRNA = numpy.transpose(all_mean_eRNA)
all_std_eRNA = numpy.transpose(all_std_eRNA)
# filter the values with the lower and upper boundary specified
all_mean_eRNA[numpy.any([all_mean_eRNA <= lower_boundary, all_mean_eRNA >= upper_boundary], axis=0)]=float('nan')
all_CV_eRNA=all_std_eRNA/all_mean_eRNA
experiment_prec_eRNA = []
for key in sorted(experiment_sample_columns):
####TODO nanmean originally
experiment_prec_eRNA.append(numpy.nanmean(all_CV_eRNA[ : , experiment_sample_columns[key]]))
if summary_exp is not None:
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["PRECISION"] = {}
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["PRECISION"]["eRNA"] = experiment_prec_eRNA[0]
### plot the precision
# ind = numpy.arange(len(experiment_prec))
# width = 0.35
# plt.bar( ind, experiment_prec, width, align='center', color='#96be0e')
# if number_replicates_one==False:
# plt.bar( ind + width, experiment_prec_eRNA, width, align='center', color='w')
# legend_pre_SIRV = mpatches.Patch(facecolor="#96be0e",edgecolor="k", label="pre SIRV")
# if number_replicates_one==False:
# legend_pre_eRNA = mpatches.Patch(facecolor='w', edgecolor="k", label="pre eRNA")
# plt.legend(handles = [legend_pre_SIRV, legend_pre_eRNA], loc='upper right' )
plt.ylim(0, 1.25*max(max([experiment_prec, experiment_prec_eRNA])))
# else:
# plt.legend(handles = [legend_pre_SIRV], loc='upper right' )
# plt.ylim(0, 1.25*max(max([experiment_prec])))
# plt.xlabel('experiment')
# plt.ylabel('pre mean(CV)')
# plt.xticks(ind + width/2, xlabels)
#
# plt.title('precision')
# #plt.savefig(args.output_file + "_precision.eps", format="eps")
# #plt.savefig(args.output_file + "_precision.png", format="png")
# if not disable_plot:
# #plt.show()
# plt.close()
# else:
# plt.close()
### get the acc of one experiment
all_LFC = numpy.array(LFC.values(), float)
std_LFC = numpy.std(all_LFC, axis=0, ddof=1)
experiment_std_LFC = []
xlabels = []
for key in sorted(experiment_sample_columns):
#### TODO std originally ddof=1 ohne abs
experiment_std_LFC.append((numpy.median(abs(all_LFC[ : , experiment_sample_columns[key]]))))
xlabels.append(key)
### get the diff acc of one experiment
experiment_diff_acc = []
for key_exp in sorted(experiment_sample_columns):
if len(sorted(experiment_sample_columns[key_exp])) > 1:
ratio_values_acc = {}
for key in sorted(mean_normalized_values):
ratio_values_acc[key] = {}
diff_acc_controls = {}
i=0
for i1 in range(0, len(experiment_sample_columns[key_exp])):
for i2 in range(i1 + 1, len(experiment_sample_columns[key_exp])):
ratio_values_acc[key][i] = mean_normalized_values[key][experiment_sample_columns[key_exp][i1]]/mean_normalized_values[key][experiment_sample_columns[key_exp][i2]]
diff_acc_controls[i] = [controls[experiment_sample_columns[key_exp][i1]], controls[experiment_sample_columns[key_exp][i2]]]
i += 1
diff_LFC = {}
for key in ratio_values_acc:
if SIRV[key]['E0'] != 0:
diff_LFC[key] = numpy.empty(len(ratio_values_acc[key]))
for i in range(0, len(diff_LFC[key])):
diff_LFC[key][i] = math.log(ratio_values_acc[key][i]/(SIRV[key][diff_acc_controls[i][0]]/SIRV[key][diff_acc_controls[i][1]]), 2)
all_diff_LFC = numpy.array(diff_LFC.values(), float)
#### TODO std originally , ddof=1 ohne abs
experiment_diff_acc.append((numpy.median(abs(all_diff_LFC))))
else:
experiment_diff_acc.append(float('nan'))
# ind = numpy.arange(len(experiment_std_LFC))
# width = 0.35
# plt.bar( ind, experiment_std_LFC, width, align='center', color="#96be0e")
# plt.bar( ind + width, experiment_diff_acc, width, align='center', color='#003c5a')
# legend_acc_SIRV = mpatches.Patch(facecolor="#96be0e", edgecolor="k", label="acc SIRV")
# legend_acc_eRNA = mpatches.Patch(facecolor="#003c5a", edgecolor="k", label="dif acc SIRV")
# plt.legend(handles = [legend_acc_SIRV, legend_acc_eRNA], loc='upper right' )
# plt.xlabel('experiment')
# plt.ylabel('acc mean(abs(LFC))')
# plt.xticks(ind + width/2, xlabels)
# plt.ylim(0, 1.25*max(max([experiment_std_LFC, experiment_diff_acc])))
# plt.title('accuracy')
# #plt.savefig(args.output_file + "_accuracy.eps", format="eps")
# #plt.savefig(args.output_file + "_accuracy.png", format="png")
# if not disable_plot:
# #plt.show()
# plt.close()
# else:
# plt.close()
if not disable_output:
print "experiment " + str(experiment)
for i in range(0, len(experiment_std_LFC)):
print "accuracy =", str(experiment_std_LFC[i])
for i in range(0, len(experiment_diff_acc)):
if not math.isnan(experiment_diff_acc[i]):
print "differential accuracy =", str(experiment_diff_acc[i])
for i in range(0, len(experiment_prec)):
print "precision SIRV experiment =", str(experiment_prec[i])
for i in range(0, len(experiment_prec_eRNA)):
print "precision endogenous RNA =", str(experiment_prec_eRNA[i])
# with open(output_quality_file, 'w') as output_file:
# output_file.write("experiment" + "\t" + str(experiment)+"\n")
# output_file.write("upper boundary" + "\t" + str(upper_boundary)+"\n")
# output_file.write("lower boundary" + "\t" + str(lower_boundary)+"\n")
#
# for i in range(0, len(experiment_std_LFC)):
# output_file.write( "accuracy" + "\t" + str(experiment_std_LFC[i])+"\n")
# for i in range(0, len(experiment_diff_acc)):
# if not math.isnan(experiment_diff_acc[i]):
# output_file.write( "diff acc" + "\t" + str(experiment_diff_acc[i])+"\n")
# for i in range(0, len(experiment_prec)):
# output_file.write("prec SIRV" + "\t" + str(experiment_prec[i])+"\n")
# for i in range(0, len(experiment_prec_eRNA)):
# output_file.write("prec eRNA" + "\t" + str(experiment_prec_eRNA[i])+"\n")
if summary_exp is not None:
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['FILTERING'] = {}
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['FILTERING']["METHOD"]=threshold_method
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['FILTERING']["THRESHOLD"] = []
for item in estimated_filtering_thres_trumpet:
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['FILTERING']["THRESHOLD"].append(item)
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["PRECISION"]["UPPER_BOUNDARY"] = upper_boundary
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["PRECISION"]["LOWER_BOUNDARY"] = lower_boundary
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["PRECISION"]["SIRV"] = experiment_prec[0]
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["ACCURACY"] = {}
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["ACCURACY"]["ACC_SIRV"] = experiment_std_LFC[0]
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']["ACCURACY"]["DIFF_ACC_SIRV"]=experiment_diff_acc[0]
### Generate the data necessary for the concordance evaluation in the comparator
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR'] = {}
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES']=[]
if int(summary_exp['EXPERIMENT_PACKAGE']['EXPERIMENT']['NUMBER_SAMPLES']) == 1:
sample_id = ""
sample_test = sample_assigned[0]
if sample_test == summary_exp['EXPERIMENT_PACKAGE']['SAMPLE_SET']['SAMPLE']['@alias']:
sample_id = summary_exp['EXPERIMENT_PACKAGE']['SAMPLE_SET']['SAMPLE']['@id']
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES'].append({})
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES'][0]['@ref_sample'] = sample_id
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES'][0]['@ref_alias'] = sample_test
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES'][0]['control'] = summary_exp['EXPERIMENT_PACKAGE']['SAMPLE_SET']['SAMPLE']['SAMPLE_ATTRIBUTES']['SIRV_MIX']
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES'][0]['values'] = {}
for key in LFC:
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES'][0]['values'][key]=LFC[key][0]
elif int(summary_exp['EXPERIMENT_PACKAGE']['EXPERIMENT']['NUMBER_SAMPLES']) > 1:
index_sample = 0
for sample_test in sample_assigned:
index_search = 0
while summary_exp['EXPERIMENT_PACKAGE']['SAMPLE_SET']['SAMPLE'][index_search]['@alias'] != sample_test:
index_search += 1
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES'].append({})
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES'][index_sample]['@ref_sample'] = summary_exp['EXPERIMENT_PACKAGE']['SAMPLE_SET']['SAMPLE'][index_search]['@id']
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES'][index_sample]['@ref_alias'] = sample_test
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES'][index_sample]['control'] = summary_exp['EXPERIMENT_PACKAGE']['SAMPLE_SET']['SAMPLE'][index_search]['SAMPLE_ATTRIBUTES']['SIRV_MIX']
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES'][index_sample]['values'] = {}
for key in LFC:
summary_exp['EXPERIMENT_PACKAGE']['HISTORY']['TOOL_RUN'][1]['QUALITY_METRICS']['LFC_COMPARATOR']['SAMPLES'][index_sample]['values'][key]=LFC[key][index_sample]
index_sample += 1
return summary_exp
### calling from the command line
if __name__ == "__main__":
# create the help and user friendly method for data input/selection
parser = argparse.ArgumentParser(prog='PROG', usage='%(prog)s [options]')
parser = argparse.ArgumentParser(prog='python SIRV_Calculate_quality_metrics.py', formatter_class=argparse.RawDescriptionHelpFormatter,description=textwrap.dedent('''This script first normalizes and filters the counts in SIRVs and endogenous RNA using trumpet plots. Using SIRV values and endogenous RNA values the accuracy, differential accuracy and precision of the experiment are calculated. Outputs is on the command line. Filtered transcripts are written to a csv file. Overview trumpet plots using random values of all trumpet plots are in a png output.
NOTES:
Normalization and filtering in SIRVs:
0. SIRV reads are normalized using the overall mean and expected sum of the SIRV mix
1. SIRV values below the relative quantity threshold are set to the relative quantity threshold
Normalization and filtering in endogenous RNA (boundary evaluation):
1. endogenous RNA Values below the relative quantity threshold are set to the relative quantity threshold
2. The filtering threshold is determined or set manually
3. All values with one replicate having the relative quantity threshold as value are evaluated. If the value is above the filtering threshold the value is set to the relative quantity threshold.
|@
|@* *
|! * **
|* ** *
|*** ***
--> |***********
1. rel. quant. |********
threshold |*******
|&****** * ! @ @ @
|_____________________
^ 1. rel. quant.
| threshold
*...log(abundance estimates e.g. FPKM values)
2. A filtering threshold ! is set/determined
3. Filtering: values @ are move to &
Trumpet plots are generated for replicates within samples comparing always 2 replicates with the same sample. Random values of all trumpet plot are included in the trumpet plot. If filtering threshold is sample or experiment specific the fitted polynome is shown and the estimated thresholds given. Moreover trumpet plots show how dispersed the values are. Precision in endogenous RNA is filtered using an upper boundary and lower boundary. Estimated endogenous RNA mean abundances in a sample within the boundaries are taken for precision evaluation in endogenous RNA.
Comparing how "good" an experiment is, can be evaluated using features of SIRV. Metrices were introduced including accuracy, differential accuracy and precision.
Precision: First the mean and standard deviation (std) of replicates within a sample are taken. The coefficient of variation (CV) is the std / mean in one transcript / SIRV transcript. Using the overall mean of all CV values gives the precision. This gives an evaluation how dispersed the replicate values are within a sample)
Accuracy and differential accuracy: First the log fold changes are calculated from the measured to the expected values in SIRVs. Accuracy is the median(abs(all SIRV LFC)). Consequently the accuracy gives an estimate how close the measured value is to the expected value. Differential accuracy evaluates the ratio of one SIRV mix to another and therefore gives an accuracy evaluation of the ratio of one sample to another, which is helpful for differential expression analysis between samples.
'''), epilog="""
Example usage:
python path/to/SIRV_Calculate_quality_metrics.py -e "Experiment name" -count_list input_files/alignments/NGS_star_out/*/isoforms.fpkm_tracking -s "sample 1" "sample 1" "sample 2" "sample 2" "sample 3" "sample 3" -c E0 E0 E1 E1 E2 E2 -per_SIRV 3 3 3 3 3 3 -tm s -quant cufflinks -o SIRV_quality_metrics
End of help
""")
required = parser.add_argument_group('required arguments')
required.add_argument('-e', dest='experiment', help='name/id of experiment', type=str, required=True, metavar='name')
required.add_argument('-count_list', type=str, nargs='+', help='List of files with count values', required=True, metavar='count_file')
required.add_argument('-s', dest='sample', help='name/id of sample (assign an sample to every count file)', type=str, nargs='+', required=True, metavar='name')
required.add_argument('-c', dest='control', help='controls spiked in in replicate (assign a control to every count file)', type=str, nargs='+', choices=['E0', 'E1', 'E2'], required=True)
required.add_argument('-per_SIRV', type=float, help='SIRV amount in percent (assign a percentage to every count file)', metavar='float', nargs='+', required=True)
parser.add_argument('-thres', dest='thres', type=float, help='relative quantity threshold which is the minimum count value, like the minimum detection value (default=1e-6)', metavar='float',default=1e-6)
parser.add_argument('-ub', type=float,default=float('inf'), help='upper boundary of count values for precision of endogenous RNA (default=inf)', metavar='float')
parser.add_argument('-lb', type=float, default=0, help='lower boundary of count values for precision of endogenous RNA (default=0)', metavar='float')
required.add_argument('-tm', dest='threshold_method', type=str, help='threshold method for filtering using trumpet plots of high abundance transcripts in endogenous RNA if one replicate value is thres (default: 1e-6), e...experiment specific, s...sample specific, m...manually (set -tmm float)', required=True, choices=['e', 's', 'm'])
parser.add_argument('-tmm', dest='threshold_value', type=float, help='manually set threshold e.g. 1e-2 (required for threshold method manually (-tm m), and if not given -tm e is used; no effect if threshold method is e or s)', metavar='float')
required.add_argument('-quant', dest='quant', help='quantification method used for abundance estimation', required=True, type=str, choices=['cufflinks','mix2','RSEM'])
# parser.add_argument('-JSON', dest='JSON_file', help='JSON file for appending the quality metrics information',type=str, metavar='json_file')
# parser.add_argument('-o_JSON', dest='output_JSON_file', help='JSON file output',type=str, metavar='json_file')
parser.add_argument('-do', dest='disable_output', action='store_true', help='disable console output')
parser.add_argument('-dp', dest='disable_plot', action='store_true', help='disable plot output')
required.add_argument('-o', dest='output_name', type=str, help='output file name without extension', metavar='output_name')
args = parser.parse_args()
if len(args.count_list) != len(args.control) or len(args.count_list) != len(args.sample) or len(args.count_list) != len(args.per_SIRV):
if not args.disable_output:
print "please assign an experiment and sample to every dataset"
else:
if not args.disable_output:
print "processing files"
### get experiment and samples and sort them, obtain an order for retrieval of the count files
order,samples_ordered, replicates, controls=SIRV_preprocessing_inputs.obtain_information(args.sample,args.control,args.disable_output)
### obtain count values already sorted and a dict_transcripts
count_SIRVs,_,count_eRNA,_ =SIRV_preprocessing_inputs.counts_converter(order, args.count_list,args.quant)
quality_metrics(args.experiment, samples_ordered, replicates, controls, SIRV_links.SIRV_mix_files, count_SIRVs, count_eRNA, args.per_SIRV, args.thres, args.lb, args.ub, args.disable_output, args.disable_plot, args.threshold_method, args.threshold_value, args.output_name, None)
| gpl-3.0 |
vincentdumont/nuri | nuri/check24hrs.py | 1 | 5494 | #!/usr/bin/env python
import sys,nuri,os,numpy
import matplotlib.pyplot as plt
import matplotlib.dates as md
from datetime import datetime,timedelta
def check24hrs(date):
"""
This operation will display the active periods for which data are
available from every sensors.
Parameters
----------
date : str
Year and month to display activity from. The format shoud be YYYY-MM.
"""
# Check if date is given
if date==None:
print 'date missing...'
quit()
# List all the months
dates = numpy.empty((0,5))
y0 = int(date.split('-')[0])
m0 = int(date.split('-')[1])
d0 = datetime(y0,m0,1)
y1 = y0 if m0<12 else y0+1
m1 = m0+1 if m0<12 else 1
d1 = datetime(y1,m1,1)-timedelta(hours=1)
dt = timedelta(hours=1)
dates = numpy.arange(d0,d1,dt)
# Download metadata from Google Drive
sys.stderr.write('Retrieve information from Google Drive...')
os.system('skicka ls -r /MagneticFieldData/%s/%s/ > data'%(y0,m0))
data = numpy.loadtxt('data',dtype=str,delimiter='\n')
print >>sys.stderr,' done!'
# List file path for each date and each station
sys.stderr.write('Select active hours for each station...')
st0,st1,st2,st3,st4 = [],[],[],[],[]
for d in dates:
year = d.astype(object).year
month = d.astype(object).month
day = d.astype(object).day
hour = d.astype(object).hour
path = 'MagneticFieldData/%i/%i/%i/%i/'%(year,month,day,hour)
fname = '%i-%i-%i_%i-xx.zip'%(year,month,day,hour)
st0.append(path+'NURI-station/' +fname)
st1.append(path+'NURI-station-01/'+fname)
st2.append(path+'NURI-station-02/'+fname)
st3.append(path+'NURI-station-03/'+fname)
st4.append(path+'NURI-station-04/'+fname)
st0 = numpy.array([1 if path in data else 0 for path in st0])
st1 = numpy.array([1 if path in data else 0 for path in st1])
st2 = numpy.array([1 if path in data else 0 for path in st2])
st3 = numpy.array([1 if path in data else 0 for path in st3])
st4 = numpy.array([1 if path in data else 0 for path in st4])
print >>sys.stderr,' done!'
# Write down information in text file
print 'Save information in ASCII file...'
o = open('%i-%02i.dat'%(y0,m0),'w')
for d in dates:
year = d.astype(object).year
month = d.astype(object).month
day = d.astype(object).day
hour = d.astype(object).hour
path = 'MagneticFieldData/%i/%i/%i/%i/'%(year,month,day,hour)
fname = '%i-%i-%i_%i-xx.zip'%(year,month,day,hour)
o.write('%i-%02i-%02i_%02i'%(year,month,day,hour))
o.write(' NURI-station ') if path+'NURI-station/' +fname in data else o.write(' - ')
o.write(' NURI-station-01') if path+'NURI-station-01/'+fname in data else o.write(' - ')
o.write(' NURI-station-02') if path+'NURI-station-01/'+fname in data else o.write(' - ')
o.write(' NURI-station-03') if path+'NURI-station-01/'+fname in data else o.write(' - ')
o.write(' NURI-station-04') if path+'NURI-station-01/'+fname in data else o.write(' - ')
o.write('\n')
o.close()
dates = [d.astype(object) for d in dates]
plt.rc('font', size=2, family='serif')
plt.rc('axes', labelsize=10, linewidth=0.2)
plt.rc('legend', fontsize=2, handlelength=10)
plt.rc('xtick', labelsize=7)
plt.rc('ytick', labelsize=7)
plt.rc('lines', lw=0.2, mew=0.2)
plt.rc('grid', linewidth=0.2)
fig = plt.figure(figsize=(10,6))
plt.subplots_adjust(left=0.07, right=0.95, bottom=0.1, top=0.96, hspace=0.2, wspace=0)
print 'Plot active time for station 1...'
ax1 = fig.add_subplot(511)
ax1.bar(dates,st1,width=0.01,edgecolor='none',color='green')
ax1.tick_params(direction='in')
ax1.set_ylabel('Station 1')
ax1.xaxis_date()
plt.yticks([])
ax1.grid()
print 'Plot active time for station 2...'
ax = fig.add_subplot(512,sharex=ax1,sharey=ax1)
ax.bar(dates,st2,width=0.01,edgecolor='none',color='green')
ax.tick_params(direction='in')
ax.set_ylabel('Station 2')
ax.xaxis_date()
plt.yticks([])
ax.grid()
print 'Plot active time for station 3...'
ax = fig.add_subplot(513,sharex=ax1,sharey=ax1)
ax.bar(dates,st3,width=0.01,edgecolor='none',color='green')
ax.tick_params(direction='in')
ax.set_ylabel('Station 3')
ax.xaxis_date()
plt.yticks([])
ax.grid()
print 'Plot active time for station 4...'
ax = fig.add_subplot(514,sharex=ax1,sharey=ax1)
ax.bar(dates,st4,width=0.01,edgecolor='none',color='green')
ax.tick_params(direction='in')
ax.set_ylabel('Station 4')
ax.xaxis_date()
plt.yticks([])
ax.grid()
print 'Plot active time for station 0...'
ax = fig.add_subplot(515,sharex=ax1,sharey=ax1)
ax.bar(dates,st0,width=0.01,edgecolor='none',color='green')
ax.tick_params(direction='in')
ax.set_ylabel('Station 0')
ax.xaxis_date()
plt.yticks([])
ax.grid()
ax.set_xlabel(r'Hourly activity in %s %i (UTC)'%(d0.strftime("%B"),y0))
ax1.xaxis.set_major_formatter(md.DateFormatter('%d'))
ax1.xaxis.set_major_locator(md.DayLocator())
ax1.set_xlim(d0,d1)
ax1.set_ylim(0,1)
plt.savefig('%i-%02i.pdf'%(y0,m0),dpi=80)
if __name__=='__main__':
from nuri import get_args
args = get_args()
active(args.date)
| mit |
dnjohnstone/hyperspy | hyperspy/drawing/_markers/horizontal_line_segment.py | 4 | 3559 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from hyperspy.drawing.marker import MarkerBase
class HorizontalLineSegment(MarkerBase):
"""Horizontal line segment marker that can be added to the signal figure
Parameters
----------
x1 : array or float
The position of the start of the line segment in x.
If float, the marker is fixed.
If array, the marker will be updated when navigating. The array should
have the same dimensions in the navigation axes.
x2 : array or float
The position of the end of the line segment in x.
see x1 arguments
y : array or float
The position of line segment in y.
see x1 arguments
kwargs :
Keywords argument of axvline valid properties (i.e. recognized by
mpl.plot).
Example
-------
>>> im = hs.signals.Signal2D(np.zeros((100, 100)))
>>> m = hs.plot.markers.horizontal_line_segment(
>>> x1=20, x2=70, y=70, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m)
Adding a marker permanently to a signal
>>> im = hs.signals.Signal2D(np.zeros((100, 100)))
>>> m = hs.plot.markers.horizontal_line_segment(
>>> x1=10, x2=30, y=42, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m, permanent=True)
"""
def __init__(self, x1, x2, y, **kwargs):
MarkerBase.__init__(self)
lp = {'color': 'black', 'linewidth': 1}
self.marker_properties = lp
self.set_data(x1=x1, x2=x2, y1=y)
self.set_marker_properties(**kwargs)
self.name = 'horizontal_line_segment'
def __repr__(self):
string = "<marker.{}, {} (x1={},x2={},y={},color={})>".format(
self.__class__.__name__,
self.name,
self.get_data_position('x1'),
self.get_data_position('x2'),
self.get_data_position('y1'),
self.marker_properties['color'],
)
return(string)
def update(self):
if self.auto_update is False:
return
self._update_segment()
def _plot_marker(self):
self.marker = self.ax.vlines(0, 0, 1, **self.marker_properties)
self._update_segment()
def _update_segment(self):
segments = self.marker.get_segments()
segments[0][0, 1] = self.get_data_position('y1')
segments[0][1, 1] = segments[0][0, 1]
if self.get_data_position('x1') is None:
segments[0][0, 0] = plt.getp(self.marker.axes, 'xlim')[0]
else:
segments[0][0, 0] = self.get_data_position('x1')
if self.get_data_position('x2') is None:
segments[0][1, 0] = plt.getp(self.marker.axes, 'xlim')[1]
else:
segments[0][1, 0] = self.get_data_position('x2')
self.marker.set_segments(segments)
| gpl-3.0 |
erjerison/adaptability | github_submission/plot_figure5.py | 1 | 19409 | import numpy
import sys
import matplotlib.pylab as pt
import matplotlib.cm
import numpy.random
import matplotlib.ticker as ticker
from matplotlib.lines import Line2D
import scipy.stats
matplotlib.rcParams['font.sans-serif'] = 'Arial'
matplotlib.rcParams['font.size'] = 10.0
matplotlib.rcParams['lines.markeredgewidth'] = 0
matplotlib.rcParams['lines.markersize'] = 3
matplotlib.rcParams['lines.linewidth'] = 1
matplotlib.rcParams['legend.fontsize'] = 8.0
matplotlib.rcParams['axes.linewidth']=.5
matplotlib.rcParams['patch.linewidth']=.5
def permute_within_categories(categories, cat_inds):
#categories: 1d array where each item has an index indicating which category it belongs to. The category indices need not be consecutive.
#cat_inds: list of category indices.
n = len(categories)
inds = numpy.arange(n) #Original order
permuted_order = numpy.zeros((n,),dtype='int')
for i in range(len(cat_inds)):
items_in_cat_unpermuted = inds[categories == cat_inds[i]]
permuted_order[items_in_cat_unpermuted] = numpy.random.permutation(items_in_cat_unpermuted)
return permuted_order
def calculate_cat_inds(categories):
categories = numpy.array(categories)
return numpy.unique(categories)
def calculate_helper_matrix(categories, cat_inds):
#The helper matrix is a utility for quickly summing over specified rows in a table. It is intended to be matrix multiplied by the original mutation table; hence it is n_cats x n_pops
num_cats = len(cat_inds)
num_pops = len(categories)
helper_matrix = numpy.zeros((num_cats,num_pops))
for i in range(num_cats):
specific_cat_inds = numpy.where(categories == cat_inds[i])
helper_matrix[i, specific_cat_inds] = 1
return helper_matrix
def calculate_entropy_statistic(mutation_table, helper_matrix):
muts_per_gene = numpy.sum(mutation_table, axis = 0)
collapsed_table = numpy.dot(helper_matrix,mutation_table)
pops_per_category = numpy.dot(helper_matrix,helper_matrix.T)
#print pops_per_category
probs = numpy.dot(numpy.linalg.inv(pops_per_category),collapsed_table)
num_genes = mutation_table.shape[1]
entropies = numpy.zeros((num_genes,))
total_pops = numpy.float(numpy.sum(pops_per_category))
for i in range(num_genes):
nonzero_inds = numpy.all([probs[:,i] > 0 , probs[:,i]< 1], axis = 0)
nonzero_p_hit = probs[:,i][nonzero_inds]
nonzero_p_no_hit = 1. - nonzero_p_hit
pops_per_cat_temp = numpy.diag(pops_per_category)[nonzero_inds]
entropies[i] = numpy.sum(-1*pops_per_cat_temp/total_pops*(nonzero_p_hit*numpy.log2(nonzero_p_hit) + nonzero_p_no_hit*numpy.log2(nonzero_p_no_hit)))
return numpy.sum(entropies)
def calculate_entropy_statistic2(mutation_table, helper_matrix):
#This function can be used to weight double-hit mutations less than other mutations, since they carry less information.
#However, for this dataset including the 2-hit mutations with equal weight was equivalently sensitive.
muts_per_gene = numpy.sum(mutation_table, axis = 0)
collapsed_table = numpy.dot(helper_matrix,mutation_table)
pops_per_category = numpy.dot(helper_matrix,helper_matrix.T)
probs = numpy.dot(numpy.linalg.inv(pops_per_category),collapsed_table) #probability that a population in this category got a mutation
#print probs
num_genes = mutation_table.shape[1]
entropies = numpy.zeros((num_genes,))
weight = 1.
total_pops = numpy.float(numpy.sum(pops_per_category))
for i in range(num_genes):
if muts_per_gene[i] > 2.1:
nonzero_inds = numpy.all([probs[:,i] > 0 , probs[:,i]< 1], axis = 0)
nonzero_p_hit = probs[:,i][nonzero_inds]
nonzero_p_no_hit = 1. - nonzero_p_hit
pops_per_cat_temp = numpy.diag(pops_per_category)[nonzero_inds]
entropies[i] = numpy.sum(-1*pops_per_cat_temp/total_pops*(nonzero_p_hit*numpy.log2(nonzero_p_hit) + nonzero_p_no_hit*numpy.log2(nonzero_p_no_hit)))
else:
nonzero_inds = numpy.all([probs[:,i] > 0 , probs[:,i]< 1], axis = 0)
nonzero_p_hit = probs[:,i][nonzero_inds]
nonzero_p_no_hit = 1. - nonzero_p_hit
pops_per_cat_temp = numpy.diag(pops_per_category)[nonzero_inds]
entropies[i] = weight*numpy.sum(-1*pops_per_cat_temp/total_pops*(nonzero_p_hit*numpy.log2(nonzero_p_hit) + nonzero_p_no_hit*numpy.log2(nonzero_p_no_hit)))
return numpy.sum(entropies)
#Read in the list of mutations that fixed in each population. Filter out snps that occur in multiple descendants of the same founder--these were SGV from the passaging of this segregant well.
input_file = 'data/mutation_lists_with_aa_positions_reannotated.txt'
#First loop to find any mutations that are shared among descendants of the same segregant
file = open(input_file,'r')
file_lines = file.readlines()
file.close()
segregant_mut_dict = {}
common_mut_dict = {}
for line in file_lines:
linelist = line.strip().split('\t')
if len(linelist) < 1.5:
#Go to the next clone
clone_name = linelist[0]
segregant = clone_name.split('_')[0]
if segregant not in segregant_mut_dict:
segregant_mut_dict[segregant] = []
else:
mutation = ('_').join(str(i) for i in linelist)
if len(linelist) > 5.5:
if linelist[6] == 'Non':
if mutation in segregant_mut_dict[segregant]:
print segregant, mutation
if segregant in common_mut_dict:
common_mut_dict[segregant].append(mutation)
else:
common_mut_dict[segregant] = [mutation]
if mutation not in segregant_mut_dict[segregant]:
segregant_mut_dict[segregant].append(mutation)
##Second loop to identify all de novo nonsynonymous mutations (and indels)
gene_dict_by_sample = {}
mutation_dict_by_sample = {}
for line in file_lines:
linelist = line.strip().split('\t')
if len(linelist) < 1.5:
#Go to the next clone
clone_name = linelist[0]
gene_dict_by_sample[clone_name] = []
mutation_dict_by_sample[clone_name] = []
local_gene_names = []
segregant = clone_name.split('_')[0]
else:
gene_name = linelist[4]
mutation = ('_').join(str(i) for i in linelist)
if len(linelist) > 5.5:
if linelist[6] == 'Non':
if segregant in common_mut_dict: #There might be shared ancestral snps
if ((gene_name not in local_gene_names) and (len(gene_name) < 6.5) and (mutation not in common_mut_dict[segregant])): #We have not already counted this mutation, it is not an ancestral mutation, and it is not in a dubious ORF
local_gene_names.append(gene_name)
gene_dict_by_sample[clone_name].append(gene_name)
mutation_dict_by_sample[clone_name].append(mutation)
elif ((gene_name not in local_gene_names) and (len(gene_name) < 6.5)): #We have not already counted this mutation, it is not an ancestral mutation, and it is not in a dubious ORF
local_gene_names.append(gene_name)
gene_dict_by_sample[clone_name].append(gene_name)
mutation_dict_by_sample[clone_name].append(mutation)
##Import fitness and founder genotype data
filename1 = 'data/fitness_measurements_with_population_names_12_29_2016.csv'
filename2 = 'data/control_replicate_measurements.csv'
filename3 = 'data/segregant_genotypes_deduplicated_with_header.csv'
segregant_vector = []
init_fits_ypd = []
init_std_errs_ypd = []
init_fits_sc = []
init_std_errs_sc = []
final_fits_ypd_pops_in_ypd = []
segregant_vector_ypd_pops = []
clone_vector_ypd_pops = []
final_fits_sc_pops_in_sc = []
segregant_vector_sc_pops = []
clone_vector_sc_pops = []
final_fits_sc_pops_in_ypd = []
final_fits_ypd_pops_in_sc = []
file1 = open(filename1,'r')
firstline = 0
for line in file1:
if firstline < .5:
firstline += 1
continue
linestrs = line.strip().split(';')
segregant_vector.append(linestrs[0])
init_fits_ypd.append(float(linestrs[1]))
init_std_errs_ypd.append(float(linestrs[2]))
init_fits_sc.append(float(linestrs[3]))
init_std_errs_sc.append(float(linestrs[4]))
ypd_evolved_pops = linestrs[5].split(',')
for entry in ypd_evolved_pops:
templist = entry.split()
segregant_vector_ypd_pops.append(linestrs[0])
clone_vector_ypd_pops.append(templist[0])
final_fits_ypd_pops_in_ypd.append(float(templist[1]))
final_fits_ypd_pops_in_sc.append(float(templist[2]))
sc_evolved_pops = linestrs[6].split(',')
for entry in sc_evolved_pops:
templist = entry.split()
segregant_vector_sc_pops.append(linestrs[0])
clone_vector_sc_pops.append(templist[0])
final_fits_sc_pops_in_ypd.append(float(templist[1]))
final_fits_sc_pops_in_sc.append(float(templist[2]))
file1.close()
init_fits_ypd = numpy.array(init_fits_ypd)
init_std_errs_ypd = numpy.array(init_std_errs_ypd)
init_fits_sc = numpy.array(init_fits_sc)
init_std_errs_sc = numpy.array(init_std_errs_sc)
final_fits_ypd_pops_in_ypd = numpy.array(final_fits_ypd_pops_in_ypd)
final_fits_ypd_pops_in_sc = numpy.array(final_fits_ypd_pops_in_sc)
segregant_vector_ypd_pops = numpy.array(segregant_vector_ypd_pops)
segregant_vector_sc_pops = numpy.array(segregant_vector_sc_pops)
final_fits_sc_pops_in_ypd = numpy.array(final_fits_sc_pops_in_ypd)
final_fits_ypd_pops_in_sc = numpy.array(final_fits_ypd_pops_in_sc)
ypd_controls = {}
sc_controls = {}
file2 = open(filename2,'r')
firstline = 0
for line in file2:
if firstline < .5:
firstline += 1
continue
linestrs = line.strip().split(';')
ypd_controls[linestrs[0]] = [float(i) for i in linestrs[1].split(',')]
sc_controls[linestrs[0]] = [float(i) for i in linestrs[2].split(',')]
file2.close()
genotype_mat = []
file3 = open(filename3,'r')
firstline = 0
for line in file3:
if firstline < .5:
firstline += 1
continue
linelist = line.strip().split(';')
genotype = [int(i) for i in linelist[1].split(',')]
genotype_mat.append(genotype)
genotype_mat = numpy.array(genotype_mat)
rm_allele = numpy.array(genotype_mat[:,3777],dtype='Bool')
by_allele = numpy.array(1 - genotype_mat[:,3777],dtype='Bool')
#Use controls (~8 technical replicates each of 24 final populations) to estimate the error variance on the final fitness measurements in each environment
n_control_pops = 24.
var_sum = 0
n_total_reps = 0
for pop in ypd_controls:
fit = numpy.mean(ypd_controls[pop])
var_sum += numpy.sum((ypd_controls[pop] - fit)**2)
n_total_reps += len(ypd_controls[pop])
measurement_error_var_sc = var_sum/float(n_total_reps - n_control_pops)
var_sum = 0
n_total_reps = 0
for pop in sc_controls:
fit = numpy.mean(sc_controls[pop])
var_sum += numpy.sum((sc_controls[pop] - fit)**2)
n_total_reps += len(sc_controls[pop])
measurement_error_var_ypd = var_sum/float(n_total_reps - n_control_pops)
###
#Set up 'helper matrix' utilities to conveniently calculate averages and variances over segregant groups
num_segs = len(segregant_vector)
num_pops_ypd = len(segregant_vector_ypd_pops)
num_pops_sc = len(segregant_vector_sc_pops)
helper_matrix_ypd_pops = numpy.zeros((num_pops_ypd,num_segs))
helper_matrix_sc_pops = numpy.zeros((num_pops_sc,num_segs))
for i in range(num_segs):
current_seg = segregant_vector[i]
helper_matrix_ypd_pops[numpy.where(segregant_vector_ypd_pops == current_seg)[0],i] = 1.
helper_matrix_sc_pops[numpy.where(segregant_vector_sc_pops == current_seg)[0],i] = 1.
pops_per_seg_ypd = numpy.diag(numpy.dot(helper_matrix_ypd_pops.T,helper_matrix_ypd_pops))
pops_per_seg_sc = numpy.diag(numpy.dot(helper_matrix_sc_pops.T,helper_matrix_sc_pops))
rm_allele_pops_sc = numpy.array(numpy.dot(helper_matrix_sc_pops, rm_allele),dtype='Bool')
by_allele_pops_sc = numpy.array(numpy.dot(helper_matrix_sc_pops, by_allele),dtype='Bool')
rm_allele_pops_ypd = numpy.array(numpy.dot(helper_matrix_ypd_pops, rm_allele),dtype='Bool')
by_allele_pops_ypd = numpy.array(numpy.dot(helper_matrix_ypd_pops, by_allele),dtype='Bool')
# #Use the helper matrix to average among populations descended from a particular segregant:
delta_fits_ypd = final_fits_ypd_pops_in_ypd - numpy.dot(helper_matrix_ypd_pops,init_fits_ypd)
delta_fits_sc = final_fits_sc_pops_in_sc - numpy.dot(helper_matrix_sc_pops,init_fits_sc)
delta_fits_ypd_in_sc = final_fits_ypd_pops_in_sc - numpy.dot(helper_matrix_ypd_pops,init_fits_sc)
delta_fits_sc_in_ypd = final_fits_sc_pops_in_ypd - numpy.dot(helper_matrix_sc_pops,init_fits_ypd)
delta_fits_ypd_means = numpy.dot(delta_fits_ypd,helper_matrix_ypd_pops)/pops_per_seg_ypd
delta_fits_sc_means = numpy.dot(delta_fits_sc,helper_matrix_sc_pops)/pops_per_seg_sc
delta_fits_sc_in_ypd_means = numpy.dot(delta_fits_sc_in_ypd,helper_matrix_sc_pops)/pops_per_seg_sc
delta_fits_ypd_in_sc_means = numpy.dot(delta_fits_ypd_in_sc,helper_matrix_ypd_pops)/pops_per_seg_ypd
#Delta fits inherit variance from the initial fitness and final fitness measurements
delta_fits_sc_vars = numpy.dot((delta_fits_sc - numpy.dot(helper_matrix_sc_pops,delta_fits_sc_means))**2, helper_matrix_sc_pops)/(pops_per_seg_sc - 1.) + init_std_errs_sc**2
delta_fits_sc_std_errs = numpy.sqrt(delta_fits_sc_vars/pops_per_seg_sc)
delta_fits_ypd_vars = numpy.dot((delta_fits_ypd - numpy.dot(helper_matrix_ypd_pops,delta_fits_ypd_means))**2, helper_matrix_ypd_pops)/(pops_per_seg_ypd - 1.) + init_std_errs_ypd**2
delta_fits_ypd_std_errs = numpy.sqrt(delta_fits_ypd_vars/pops_per_seg_ypd)
delta_fits_ypd_in_sc_vars = numpy.dot((delta_fits_ypd_in_sc - numpy.dot(helper_matrix_ypd_pops,delta_fits_ypd_in_sc_means))**2, helper_matrix_ypd_pops)/(pops_per_seg_ypd - 1.) + init_std_errs_sc**2
delta_fits_ypd_in_sc_std_errs = numpy.sqrt(delta_fits_ypd_in_sc_vars/pops_per_seg_ypd)
delta_fits_sc_in_ypd_vars = numpy.dot((delta_fits_sc_in_ypd - numpy.dot(helper_matrix_sc_pops,delta_fits_sc_in_ypd_means))**2, helper_matrix_sc_pops)/(pops_per_seg_sc - 1.) + init_std_errs_ypd**2 #- measurement_error_ypd
delta_fits_sc_in_ypd_std_errs = numpy.sqrt(delta_fits_sc_in_ypd_vars/pops_per_seg_sc)
####First calculation: number of nonsynonymous, genic mutations vs. fitness in each evolution condition
clone_vector_sc_pops = numpy.array(clone_vector_sc_pops)
clone_vector_ypd_pops = numpy.array(clone_vector_ypd_pops)
clones_sc = [ i + '_' + j for [i,j] in numpy.stack((segregant_vector_sc_pops, clone_vector_sc_pops)).T]
clones_ypd = [i + '_' + j for [i,j] in numpy.stack((segregant_vector_ypd_pops, clone_vector_ypd_pops)).T]
num_muts_fit_array_sc = []
num_muts_fit_array_ypd = []
seg_list_sc_seq = []
seg_list_ypd_seq = []
num_muts_seg_dict_sc = {}
num_muts_seg_dict_ypd = {}
for clone in gene_dict_by_sample:
name_strs = clone.split('_')
seg = name_strs[0]
evol_env = name_strs[2]
clone_num = name_strs[1]
seg_index = segregant_vector.index(seg)
full_name = seg + '_' + clone_num
if seg not in num_muts_seg_dict_sc:
num_muts_seg_dict_sc[seg] = {}
num_muts_seg_dict_sc[seg]['init_fit'] = init_fits_sc[seg_index]
num_muts_seg_dict_sc[seg]['kre'] = rm_allele[seg_index]
num_muts_seg_dict_sc[seg]['num_muts'] = []
if seg not in num_muts_seg_dict_ypd:
num_muts_seg_dict_ypd[seg] = {}
num_muts_seg_dict_ypd[seg]['init_fit'] = init_fits_ypd[seg_index]
num_muts_seg_dict_ypd[seg]['kre'] = rm_allele[seg_index]
num_muts_seg_dict_ypd[seg]['num_muts'] = []
if (evol_env == 'sc' and full_name in clones_sc):
index_sc = clones_sc.index(full_name)
seg_list_sc_seq.append(seg)
elif (evol_env == 'ypd' and full_name in clones_ypd):
index_ypd = clones_ypd.index(full_name)
seg_list_ypd_seq.append(seg)
num_muts = len(gene_dict_by_sample[clone])
if (evol_env == 'sc' and full_name in clones_sc):
num_muts_seg_dict_sc[seg]['num_muts'].append(num_muts)
num_muts_fit_array_sc.append([init_fits_sc[seg_index], delta_fits_sc[index_sc], num_muts, init_std_errs_sc[seg_index], rm_allele[seg_index]])
elif (evol_env == 'ypd' and full_name in clones_ypd):
num_muts_seg_dict_ypd[seg]['num_muts'].append(num_muts)
num_muts_fit_array_ypd.append([init_fits_ypd[seg_index], delta_fits_ypd[index_ypd], num_muts, init_std_errs_ypd[seg_index], rm_allele[seg_index]])
##To plot just the sequenced segregants:
num_muts_fit_array_sc = numpy.array(num_muts_fit_array_sc)
num_muts_fit_array_ypd = numpy.array(num_muts_fit_array_ypd)
msc, bsc = numpy.polyfit(num_muts_fit_array_sc[:,0], num_muts_fit_array_sc[:,2], 1)
mypd, bypd = numpy.polyfit(num_muts_fit_array_ypd[:,0], num_muts_fit_array_ypd[:,2], 1)
r_sc, p_sc = scipy.stats.pearsonr(num_muts_fit_array_sc[:,0], num_muts_fit_array_sc[:,2])
r_ypd, p_ypd = scipy.stats.pearsonr(num_muts_fit_array_ypd[:,0], num_muts_fit_array_ypd[:,2])
print p_sc, p_ypd
###
fig, (ax1, ax2) = pt.subplots(1,2,figsize=(8,4))
colors = ['brown','Tomato']
for seg in num_muts_seg_dict_sc:
init_fit = num_muts_seg_dict_sc[seg]['init_fit']
num_muts = num_muts_seg_dict_sc[seg]['num_muts']
kre_status = num_muts_seg_dict_sc[seg]['kre']
color1 = colors[kre_status]
offset = 0
for num in sorted(num_muts):
ax2.plot(init_fit, num + offset, 'o', color=color1, alpha=.9, markeredgewidth=0)
offset += .08
ax2.plot([init_fit,init_fit], [min(num_muts), max(num_muts) + offset - .08], color=color1,alpha=.9,linewidth=.5)
ax2.set_ylim(-.5,13)
ax2.set_xlim(-.2,.18)
ax2.set_xlabel('Initial fitness, 37 C (%)')
ax2.set_ylabel('Number of nonsynon. muts')
#ax2.set_frame_on(False)
ax2.set_axisbelow(True)
ax2.get_xaxis().tick_bottom()
ax2.get_yaxis().tick_left()
ax2.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='on', # ticks along the bottom edge are on
top='off', # ticks along the top edge are off
labelbottom='on')
xmin, xmax = ax2.get_xaxis().get_view_interval()
ymin, ymax = ax2.get_yaxis().get_view_interval()
#ax2.add_artist(Line2D((xmin, xmax), (ymin, ymin), color='black', linewidth=2))
#ax2.add_artist(Line2D((xmin, xmin), (ymin, ymax), color='black', linewidth=2))
ax2.set_xticks(numpy.arange(-.2,.19,.05))
ax2.plot(numpy.arange(-.19,.18,.01), msc*numpy.arange(-.19,.18,.01) + bsc, 'k')
ax2.set_xticklabels(numpy.arange(-20,19,5))
ax2.text(.06,10,'$r^2=$' + str(round(r_sc**2,2)), fontsize=12)
ax2.set_title('Evolved at 37 C')
colors=['DarkSlateBlue','MediumSlateBlue']
for seg in num_muts_seg_dict_ypd:
init_fit = num_muts_seg_dict_ypd[seg]['init_fit']
num_muts = num_muts_seg_dict_ypd[seg]['num_muts']
kre_status = num_muts_seg_dict_ypd[seg]['kre']
color1 = colors[kre_status]
offset = 0
for num in sorted(num_muts):
ax1.plot(init_fit, num + offset, 'o', color=color1, alpha=.9, markeredgewidth=0)
offset += .08
ax1.plot([init_fit,init_fit], [min(num_muts), max(num_muts) + offset - .08], color=color1,alpha=.9,linewidth=.5)
ax1.set_ylim(-.25,6)
ax1.set_xlim(-.15,.1)
#ax1.set_frame_on(False)
ax1.set_axisbelow(True)
ax1.get_xaxis().tick_bottom()
ax1.get_yaxis().tick_left()
ax1.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='on', # ticks along the bottom edge are on
top='off', # ticks along the top edge are off
labelbottom='on')
xmin, xmax = ax1.get_xaxis().get_view_interval()
ymin, ymax = ax1.get_yaxis().get_view_interval()
#ax1.add_artist(Line2D((xmin, xmax), (ymin, ymin), color='black', linewidth=2))
#ax1.add_artist(Line2D((xmin, xmin), (ymin, ymax), color='black', linewidth=2))
ax1.set_xticks(numpy.arange(-.15,.11,.05))
ax1.plot(numpy.arange(-.13,.1,.01), mypd*numpy.arange(-.13,.1,.01) + bypd, 'k')
ax1.set_xticklabels(numpy.arange(-15,11,5))
ax1.set_xlabel('Initial fitness, 30 C (%)')
ax1.set_ylabel('Number of nonsynon. muts')
ax1.set_title('Evolved at 30 C')
ax1.text(.025,4.5,'$r^2=$' + str(round(r_ypd**2,2)), fontsize=12)
pt.savefig('Init_fit_v_num_mut_1_9_2016.pdf',bbox_inches='tight') | mit |
cpcloud/blaze | blaze/compute/tests/test_hdfstore.py | 14 | 1791 | import pytest
tables = pytest.importorskip('tables')
from blaze.compute.hdfstore import *
from blaze.utils import tmpfile
from blaze import symbol, discover, compute
import pandas as pd
from datetime import datetime
from odo import Chunks, resource, into
import os
try:
f = pd.HDFStore('foo')
except (RuntimeError, ImportError) as e:
pytest.skip('skipping test_hdfstore.py %s' % e)
else:
f.close()
os.remove('foo')
df = pd.DataFrame([['a', 1, 10., datetime(2000, 1, 1)],
['ab', 2, 20., datetime(2000, 2, 2)],
['abc', 3, 30., datetime(2000, 3, 3)],
['abcd', 4, 40., datetime(2000, 4, 4)]],
columns=['name', 'a', 'b', 'time'])
def test_hdfstore():
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, '/appendable', format='table')
df.to_hdf(fn, '/fixed')
hdf = resource('hdfstore://%s' % fn)
s = symbol('s', discover(hdf))
assert isinstance(compute(s.fixed, hdf),
(pd.DataFrame, pd.io.pytables.Fixed))
assert isinstance(compute(s.appendable, hdf),
(pd.io.pytables.AppendableFrameTable, Chunks))
s = symbol('s', discover(df))
f = resource('hdfstore://%s::/fixed' % fn)
a = resource('hdfstore://%s::/appendable' % fn)
assert isinstance(pre_compute(s, a), Chunks)
hdf.close()
f.parent.close()
a.parent.close()
def test_groups():
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, '/data/fixed')
hdf = resource('hdfstore://%s' % fn)
assert discover(hdf) == discover({'data': {'fixed': df}})
s = symbol('s', discover(hdf))
assert list(compute(s.data.fixed, hdf).a) == [1, 2, 3, 4]
hdf.close()
| bsd-3-clause |
AllenDowney/ThinkBayes2 | scripts/sat.py | 3 | 12381 | """This file contains code used in "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import csv
import math
import numpy
import sys
import matplotlib
import matplotlib.pyplot as pyplot
import thinkbayes2
import thinkplot
def ReadScale(filename='sat_scale.csv', col=2):
"""Reads a CSV file of SAT scales (maps from raw score to standard score).
Args:
filename: string filename
col: which column to start with (0=Reading, 2=Math, 4=Writing)
Returns: thinkbayes2.Interpolator object
"""
def ParseRange(s):
"""Parse a range of values in the form 123-456
s: string
"""
t = [int(x) for x in s.split('-')]
return 1.0 * sum(t) / len(t)
fp = open(filename)
reader = csv.reader(fp)
raws = []
scores = []
for t in reader:
try:
raw = int(t[col])
raws.append(raw)
score = ParseRange(t[col+1])
scores.append(score)
except ValueError:
pass
raws.sort()
scores.sort()
return thinkbayes2.Interpolator(raws, scores)
def ReadRanks(filename='sat_ranks.csv'):
"""Reads a CSV file of SAT scores.
Args:
filename: string filename
Returns:
list of (score, freq) pairs
"""
fp = open(filename)
reader = csv.reader(fp)
res = []
for t in reader:
try:
score = int(t[0])
freq = int(t[1])
res.append((score, freq))
except ValueError:
pass
return res
def DivideValues(pmf, denom):
"""Divides the values in a Pmf by denom.
Returns a new Pmf.
"""
new = thinkbayes2.Pmf()
denom = float(denom)
for val, prob in pmf.Items():
x = val / denom
new.Set(x, prob)
return new
class Exam(object):
"""Encapsulates information about an exam.
Contains the distribution of scaled scores and an
Interpolator that maps between scaled and raw scores.
"""
def __init__(self):
self.scale = ReadScale()
scores = ReadRanks()
score_pmf = thinkbayes2.Pmf(dict(scores))
self.raw = self.ReverseScale(score_pmf)
self.max_score = max(self.raw.Values())
self.prior = DivideValues(self.raw, denom=self.max_score)
center = -0.05
width = 1.8
self.difficulties = MakeDifficulties(center, width, self.max_score)
def CompareScores(self, a_score, b_score, constructor):
"""Computes posteriors for two test scores and the likelihood ratio.
a_score, b_score: scales SAT scores
constructor: function that instantiates an Sat or Sat2 object
"""
a_sat = constructor(self, a_score)
b_sat = constructor(self, b_score)
a_sat.PlotPosteriors(b_sat)
if constructor is Sat:
PlotJointDist(a_sat, b_sat)
top = TopLevel('AB')
top.Update((a_sat, b_sat))
top.Print()
ratio = top.Prob('A') / top.Prob('B')
print('Likelihood ratio', ratio)
posterior = ratio / (ratio + 1)
print('Posterior', posterior)
if constructor is Sat2:
ComparePosteriorPredictive(a_sat, b_sat)
def MakeRawScoreDist(self, efficacies):
"""Makes the distribution of raw scores for given difficulty.
efficacies: Pmf of efficacy
"""
pmfs = thinkbayes2.Pmf()
for efficacy, prob in efficacies.Items():
scores = self.PmfCorrect(efficacy)
pmfs.Set(scores, prob)
mix = thinkbayes2.MakeMixture(pmfs)
return mix
def CalibrateDifficulty(self):
"""Make a plot showing the model distribution of raw scores."""
thinkplot.Clf()
thinkplot.PrePlot(num=2)
cdf = thinkbayes2.Cdf(self.raw, label='data')
thinkplot.Cdf(cdf)
efficacies = thinkbayes2.MakeNormalPmf(0, 1.5, 3)
pmf = self.MakeRawScoreDist(efficacies)
cdf = thinkbayes2.Cdf(pmf, label='model')
thinkplot.Cdf(cdf)
thinkplot.Save(root='sat_calibrate',
xlabel='raw score',
ylabel='CDF',
formats=['pdf', 'eps'])
def PmfCorrect(self, efficacy):
"""Returns the PMF of number of correct responses.
efficacy: float
"""
pmf = PmfCorrect(efficacy, self.difficulties)
return pmf
def Lookup(self, raw):
"""Looks up a raw score and returns a scaled score."""
return self.scale.Lookup(raw)
def Reverse(self, score):
"""Looks up a scaled score and returns a raw score.
Since we ignore the penalty, negative scores round up to zero.
"""
raw = self.scale.Reverse(score)
return raw if raw > 0 else 0
def ReverseScale(self, pmf):
"""Applies the reverse scale to the values of a PMF.
Args:
pmf: Pmf object
scale: Interpolator object
Returns:
new Pmf
"""
new = thinkbayes2.Pmf()
for val, prob in pmf.Items():
raw = self.Reverse(val)
new.Incr(raw, prob)
return new
class Sat(thinkbayes2.Suite):
"""Represents the distribution of p_correct for a test-taker."""
def __init__(self, exam, score):
self.exam = exam
self.score = score
# start with the prior distribution
thinkbayes2.Suite.__init__(self, exam.prior)
# update based on an exam score
self.Update(score)
def Likelihood(self, data, hypo):
"""Computes the likelihood of a test score, given efficacy."""
p_correct = hypo
score = data
k = self.exam.Reverse(score)
n = self.exam.max_score
like = thinkbayes2.EvalBinomialPmf(k, n, p_correct)
return like
def PlotPosteriors(self, other):
"""Plots posterior distributions of efficacy.
self, other: Sat objects.
"""
thinkplot.Clf()
thinkplot.PrePlot(num=2)
cdf1 = thinkbayes2.Cdf(self, label='posterior %d' % self.score)
cdf2 = thinkbayes2.Cdf(other, label='posterior %d' % other.score)
thinkplot.Cdfs([cdf1, cdf2])
thinkplot.Save(xlabel='p_correct',
ylabel='CDF',
axis=[0.7, 1.0, 0.0, 1.0],
root='sat_posteriors_p_corr',
formats=['pdf', 'eps'])
class Sat2(thinkbayes2.Suite):
"""Represents the distribution of efficacy for a test-taker."""
def __init__(self, exam, score):
self.exam = exam
self.score = score
# start with the Normal prior
efficacies = thinkbayes2.MakeNormalPmf(0, 1.5, 3)
thinkbayes2.Suite.__init__(self, efficacies)
# update based on an exam score
self.Update(score)
def Likelihood(self, data, hypo):
"""Computes the likelihood of a test score, given efficacy."""
efficacy = hypo
score = data
raw = self.exam.Reverse(score)
pmf = self.exam.PmfCorrect(efficacy)
like = pmf.Prob(raw)
return like
def MakePredictiveDist(self):
"""Returns the distribution of raw scores expected on a re-test."""
raw_pmf = self.exam.MakeRawScoreDist(self)
return raw_pmf
def PlotPosteriors(self, other):
"""Plots posterior distributions of efficacy.
self, other: Sat objects.
"""
thinkplot.Clf()
thinkplot.PrePlot(num=2)
cdf1 = thinkbayes2.Cdf(self, label='posterior %d' % self.score)
cdf2 = thinkbayes2.Cdf(other, label='posterior %d' % other.score)
thinkplot.Cdfs([cdf1, cdf2])
thinkplot.Save(xlabel='efficacy',
ylabel='CDF',
axis=[0, 4.6, 0.0, 1.0],
root='sat_posteriors_eff',
formats=['pdf', 'eps'])
def PlotJointDist(pmf1, pmf2, thresh=0.8):
"""Plot the joint distribution of p_correct.
pmf1, pmf2: posterior distributions
thresh: lower bound of the range to be plotted
"""
def Clean(pmf):
"""Removes values below thresh."""
vals = [val for val in pmf.Values() if val < thresh]
[pmf.Remove(val) for val in vals]
Clean(pmf1)
Clean(pmf2)
pmf = thinkbayes2.MakeJoint(pmf1, pmf2)
thinkplot.Figure(figsize=(6, 6))
thinkplot.Contour(pmf, contour=False, pcolor=True)
thinkplot.Plot([thresh, 1.0], [thresh, 1.0],
color='gray', alpha=0.2, linewidth=4)
thinkplot.Save(root='sat_joint',
xlabel='p_correct Alice',
ylabel='p_correct Bob',
axis=[thresh, 1.0, thresh, 1.0],
formats=['pdf', 'eps'])
def ComparePosteriorPredictive(a_sat, b_sat):
"""Compares the predictive distributions of raw scores.
a_sat: posterior distribution
b_sat:
"""
a_pred = a_sat.MakePredictiveDist()
b_pred = b_sat.MakePredictiveDist()
#thinkplot.Clf()
#thinkplot.Pmfs([a_pred, b_pred])
#thinkplot.Show()
a_like = thinkbayes2.PmfProbGreater(a_pred, b_pred)
b_like = thinkbayes2.PmfProbLess(a_pred, b_pred)
c_like = thinkbayes2.PmfProbEqual(a_pred, b_pred)
print('Posterior predictive')
print('A', a_like)
print('B', b_like)
print('C', c_like)
def PlotPriorDist(pmf):
"""Plot the prior distribution of p_correct.
pmf: prior
"""
thinkplot.Clf()
thinkplot.PrePlot(num=1)
cdf1 = thinkbayes2.Cdf(pmf, label='prior')
thinkplot.Cdf(cdf1)
thinkplot.Save(root='sat_prior',
xlabel='p_correct',
ylabel='CDF',
formats=['pdf', 'eps'])
class TopLevel(thinkbayes2.Suite):
"""Evaluates the top-level hypotheses about Alice and Bob.
Uses the bottom-level posterior distribution about p_correct
(or efficacy).
"""
def Update(self, data):
a_sat, b_sat = data
a_like = thinkbayes2.PmfProbGreater(a_sat, b_sat)
b_like = thinkbayes2.PmfProbLess(a_sat, b_sat)
c_like = thinkbayes2.PmfProbEqual(a_sat, b_sat)
a_like += c_like / 2
b_like += c_like / 2
self.Mult('A', a_like)
self.Mult('B', b_like)
self.Normalize()
def ProbCorrect(efficacy, difficulty, a=1):
"""Returns the probability that a person gets a question right.
efficacy: personal ability to answer questions
difficulty: how hard the question is
Returns: float prob
"""
return 1 / (1 + math.exp(-a * (efficacy - difficulty)))
def BinaryPmf(p):
"""Makes a Pmf with values 1 and 0.
p: probability given to 1
Returns: Pmf object
"""
pmf = thinkbayes2.Pmf()
pmf.Set(1, p)
pmf.Set(0, 1-p)
return pmf
def PmfCorrect(efficacy, difficulties):
"""Computes the distribution of correct responses.
efficacy: personal ability to answer questions
difficulties: list of difficulties, one for each question
Returns: new Pmf object
"""
pmf0 = thinkbayes2.Pmf([0])
ps = [ProbCorrect(efficacy, difficulty) for difficulty in difficulties]
pmfs = [BinaryPmf(p) for p in ps]
dist = sum(pmfs, pmf0)
return dist
def MakeDifficulties(center, width, n):
"""Makes a list of n difficulties with a given center and width.
Returns: list of n floats between center-width and center+width
"""
low, high = center-width, center+width
return numpy.linspace(low, high, n)
def ProbCorrectTable():
"""Makes a table of p_correct for a range of efficacy and difficulty."""
efficacies = [3, 1.5, 0, -1.5, -3]
difficulties = [-1.85, -0.05, 1.75]
for eff in efficacies:
print('%0.2f & ' % eff, end=' ')
for diff in difficulties:
p = ProbCorrect(eff, diff)
print('%0.2f & ' % p, end=' ')
print(r'\\')
def main(script):
ProbCorrectTable()
exam = Exam()
PlotPriorDist(exam.prior)
exam.CalibrateDifficulty()
exam.CompareScores(780, 740, constructor=Sat)
exam.CompareScores(780, 740, constructor=Sat2)
if __name__ == '__main__':
main(*sys.argv)
| mit |
apache/spark | python/pyspark/pandas/tests/test_series.py | 9 | 118972 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from collections import defaultdict
from distutils.version import LooseVersion
import inspect
from itertools import product
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pyspark.ml.linalg import SparseVector
from pyspark import pandas as ps
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.missing.series import MissingPandasLikeSeries
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
class SeriesTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pser(self):
return pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_series_ops(self):
pser = self.pser
psser = self.psser
self.assert_eq(psser + 1 + 10 * psser, pser + 1 + 10 * pser)
self.assert_eq(psser + 1 + 10 * psser.index, pser + 1 + 10 * pser.index)
self.assert_eq(psser.index + 1 + 10 * psser, pser.index + 1 + 10 * pser)
def test_series_tuple_name(self):
pser = self.pser
pser.name = ("x", "a")
psser = ps.from_pandas(pser)
self.assert_eq(psser, pser)
self.assert_eq(psser.name, pser.name)
pser.name = ("y", "z")
psser.name = ("y", "z")
self.assert_eq(psser, pser)
self.assert_eq(psser.name, pser.name)
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
s = ps.range(10)["id"]
s.__repr__()
s.rename("a", inplace=True)
self.assertEqual(s.__repr__(), s.rename("a").__repr__())
def _check_extension(self, psser, pser):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psser, pser, check_exact=False)
self.assertTrue(isinstance(psser.dtype, extension_dtypes))
else:
self.assert_eq(psser, pser)
def test_empty_series(self):
pser_a = pd.Series([], dtype="i1")
pser_b = pd.Series([], dtype="str")
self.assert_eq(ps.from_pandas(pser_a), pser_a)
psser_b = ps.from_pandas(pser_b)
self.assert_eq(psser_b, pser_b)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(ps.from_pandas(pser_a), pser_a)
self.assert_eq(ps.from_pandas(pser_b), pser_b)
def test_all_null_series(self):
pser_a = pd.Series([None, None, None], dtype="float64")
pser_b = pd.Series([None, None, None], dtype="str")
self.assert_eq(ps.from_pandas(pser_a), pser_a)
psser_b = ps.from_pandas(pser_b)
self.assert_eq(psser_b, pser_b)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(ps.from_pandas(pser_a), pser_a)
self.assert_eq(ps.from_pandas(pser_b), pser_b)
def test_head(self):
psser = self.psser
pser = self.pser
self.assert_eq(psser.head(3), pser.head(3))
self.assert_eq(psser.head(0), pser.head(0))
self.assert_eq(psser.head(-3), pser.head(-3))
self.assert_eq(psser.head(-10), pser.head(-10))
def test_last(self):
with self.assertRaises(TypeError):
self.psser.last("1D")
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pser = pd.Series([1, 2, 3, 4], index=index)
psser = ps.from_pandas(pser)
self.assert_eq(psser.last("1D"), pser.last("1D"))
def test_first(self):
with self.assertRaises(TypeError):
self.psser.first("1D")
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pser = pd.Series([1, 2, 3, 4], index=index)
psser = ps.from_pandas(pser)
self.assert_eq(psser.first("1D"), pser.first("1D"))
def test_rename(self):
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
psser = ps.from_pandas(pser)
pser.name = "renamed"
psser.name = "renamed"
self.assertEqual(psser.name, "renamed")
self.assert_eq(psser, pser)
pser.name = None
psser.name = None
self.assertEqual(psser.name, None)
self.assert_eq(psser, pser)
pidx = pser.index
psidx = psser.index
pidx.name = "renamed"
psidx.name = "renamed"
self.assertEqual(psidx.name, "renamed")
self.assert_eq(psidx, pidx)
expected_error_message = "Series.name must be a hashable type"
with self.assertRaisesRegex(TypeError, expected_error_message):
psser.name = ["renamed"]
with self.assertRaisesRegex(TypeError, expected_error_message):
psser.name = ["0", "1"]
with self.assertRaisesRegex(TypeError, expected_error_message):
ps.Series([1, 2, 3], name=["0", "1"])
def test_rename_method(self):
# Series name
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(psser.rename("y"), pser.rename("y"))
self.assertEqual(psser.name, "x") # no mutation
self.assert_eq(psser.rename(), pser.rename())
self.assert_eq((psser.rename("y") + 1).head(), (pser.rename("y") + 1).head())
psser.rename("z", inplace=True)
pser.rename("z", inplace=True)
self.assertEqual(psser.name, "z")
self.assert_eq(psser, pser)
expected_error_message = "Series.name must be a hashable type"
with self.assertRaisesRegex(TypeError, expected_error_message):
psser.rename(["0", "1"])
# Series index
# pser = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x')
# psser = ps.from_pandas(s)
# TODO: index
# res = psser.rename(lambda x: x ** 2)
# self.assert_eq(res, pser.rename(lambda x: x ** 2))
# res = psser.rename(pser)
# self.assert_eq(res, pser.rename(pser))
# res = psser.rename(psser)
# self.assert_eq(res, pser.rename(pser))
# res = psser.rename(lambda x: x**2, inplace=True)
# self.assertis(res, psser)
# s.rename(lambda x: x**2, inplace=True)
# self.assert_eq(psser, pser)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
pser = pd.Series([1.0, 2.0, 3.0], index=index, name="name")
psser = ps.from_pandas(pser)
self.assert_eq(
pser.rename_axis("index2").sort_index(),
psser.rename_axis("index2").sort_index(),
)
self.assert_eq(
(pser + 1).rename_axis("index2").sort_index(),
(psser + 1).rename_axis("index2").sort_index(),
)
pser2 = pser.copy()
psser2 = psser.copy()
pser2.rename_axis("index2", inplace=True)
psser2.rename_axis("index2", inplace=True)
self.assert_eq(pser2.sort_index(), psser2.sort_index())
self.assertRaises(ValueError, lambda: psser.rename_axis(["index2", "index3"]))
self.assertRaises(TypeError, lambda: psser.rename_axis(mapper=["index2"], index=["index3"]))
# index/columns parameters and dict_like/functions mappers introduced in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(
pser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index(),
psser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index(),
)
self.assert_eq(
pser.rename_axis(index=str.upper).sort_index(),
psser.rename_axis(index=str.upper).sort_index(),
)
else:
expected = psser
expected.index.name = "index2"
result = psser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index()
self.assert_eq(expected, result)
expected = psser
expected.index.name = "INDEX"
result = psser.rename_axis(index=str.upper).sort_index()
self.assert_eq(expected, result)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
pser = pd.Series([1.0, 2.0, 3.0], index=index, name="name")
psser = ps.from_pandas(pser)
self.assert_eq(
pser.rename_axis(["index3", "index4"]).sort_index(),
psser.rename_axis(["index3", "index4"]).sort_index(),
)
self.assertRaises(ValueError, lambda: psser.rename_axis(["index3", "index4", "index5"]))
# index/columns parameters and dict_like/functions mappers introduced in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(
pser.rename_axis(
index={"index1": "index3", "index2": "index4", "missing": "index5"}
).sort_index(),
psser.rename_axis(
index={"index1": "index3", "index2": "index4", "missing": "index5"}
).sort_index(),
)
self.assert_eq(
pser.rename_axis(index=str.upper).sort_index(),
psser.rename_axis(index=str.upper).sort_index(),
)
else:
expected = psser
expected.index.names = ["index3", "index4"]
result = psser.rename_axis(
index={"index1": "index3", "index2": "index4", "missing": "index5"}
).sort_index()
self.assert_eq(expected, result)
expected.index.names = ["INDEX1", "INDEX2"]
result = psser.rename_axis(index=str.upper).sort_index()
self.assert_eq(expected, result)
def test_or(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["left"] | psdf["right"], pdf["left"] | pdf["right"])
self.assert_eq(psdf["left"] | True, pdf["left"] | True)
self.assert_eq(psdf["left"] | False, pdf["left"] | False)
self.assert_eq(psdf["left"] | None, pdf["left"] | None)
self.assert_eq(True | psdf["right"], True | pdf["right"])
self.assert_eq(False | psdf["right"], False | pdf["right"])
self.assert_eq(None | psdf["right"], None | pdf["right"])
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_or_extenstion_dtypes(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
).astype("boolean")
psdf = ps.from_pandas(pdf)
self._check_extension(psdf["left"] | psdf["right"], pdf["left"] | pdf["right"])
self._check_extension(psdf["left"] | True, pdf["left"] | True)
self._check_extension(psdf["left"] | False, pdf["left"] | False)
self._check_extension(psdf["left"] | pd.NA, pdf["left"] | pd.NA)
self._check_extension(True | psdf["right"], True | pdf["right"])
self._check_extension(False | psdf["right"], False | pdf["right"])
self._check_extension(pd.NA | psdf["right"], pd.NA | pdf["right"])
def test_and(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["left"] & psdf["right"], pdf["left"] & pdf["right"])
self.assert_eq(psdf["left"] & True, pdf["left"] & True)
self.assert_eq(psdf["left"] & False, pdf["left"] & False)
self.assert_eq(psdf["left"] & None, pdf["left"] & None)
self.assert_eq(True & psdf["right"], True & pdf["right"])
self.assert_eq(False & psdf["right"], False & pdf["right"])
self.assert_eq(None & psdf["right"], None & pdf["right"])
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_and_extenstion_dtypes(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
).astype("boolean")
psdf = ps.from_pandas(pdf)
self._check_extension(psdf["left"] & psdf["right"], pdf["left"] & pdf["right"])
self._check_extension(psdf["left"] & True, pdf["left"] & True)
self._check_extension(psdf["left"] & False, pdf["left"] & False)
self._check_extension(psdf["left"] & pd.NA, pdf["left"] & pd.NA)
self._check_extension(True & psdf["right"], True & pdf["right"])
self._check_extension(False & psdf["right"], False & pdf["right"])
self._check_extension(pd.NA & psdf["right"], pd.NA & pdf["right"])
def test_to_numpy(self):
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(psser.to_numpy(), pser.values)
def test_isin(self):
pser = pd.Series(["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal")
psser = ps.from_pandas(pser)
self.assert_eq(psser.isin(["cow", "lama"]), pser.isin(["cow", "lama"]))
self.assert_eq(psser.isin(np.array(["cow", "lama"])), pser.isin(np.array(["cow", "lama"])))
self.assert_eq(psser.isin({"cow"}), pser.isin({"cow"}))
pser = pd.Series([np.int64(1), np.int32(1), 1])
psser = ps.from_pandas(pser)
self.assert_eq(psser.isin([np.int64(1)]), pser.isin([np.int64(1)]))
msg = "only list-like objects are allowed to be passed to isin()"
with self.assertRaisesRegex(TypeError, msg):
psser.isin(1)
def test_drop_duplicates(self):
pdf = pd.DataFrame({"animal": ["lama", "cow", "lama", "beetle", "lama", "hippo"]})
psdf = ps.from_pandas(pdf)
pser = pdf.animal
psser = psdf.animal
self.assert_eq(psser.drop_duplicates().sort_index(), pser.drop_duplicates().sort_index())
self.assert_eq(
psser.drop_duplicates(keep="last").sort_index(),
pser.drop_duplicates(keep="last").sort_index(),
)
# inplace
psser.drop_duplicates(keep=False, inplace=True)
pser.drop_duplicates(keep=False, inplace=True)
self.assert_eq(psser.sort_index(), pser.sort_index())
self.assert_eq(psdf, pdf)
def test_reindex(self):
index = ["A", "B", "C", "D", "E"]
pser = pd.Series([1.0, 2.0, 3.0, 4.0, None], index=index, name="x")
psser = ps.from_pandas(pser)
self.assert_eq(pser, psser)
self.assert_eq(
pser.reindex(["A", "B"]).sort_index(),
psser.reindex(["A", "B"]).sort_index(),
)
self.assert_eq(
pser.reindex(["A", "B", "2", "3"]).sort_index(),
psser.reindex(["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pser.reindex(["A", "E", "2"], fill_value=0).sort_index(),
psser.reindex(["A", "E", "2"], fill_value=0).sort_index(),
)
self.assertRaises(TypeError, lambda: psser.reindex(index=123))
def test_reindex_like(self):
data = [1.0, 2.0, None]
index = pd.Index(["A", "B", "C"], name="index1")
pser = pd.Series(data=data, index=index, name="name1")
psser = ps.from_pandas(pser)
# Reindexing single Index on single Index
data2 = [3.0, None, 4.0]
index2 = pd.Index(["A", "C", "D"], name="index2")
pser2 = pd.Series(data=data2, index=index2, name="name2")
psser2 = ps.from_pandas(pser2)
self.assert_eq(
pser.reindex_like(pser2).sort_index(),
psser.reindex_like(psser2).sort_index(),
)
self.assert_eq(
(pser + 1).reindex_like(pser2).sort_index(),
(psser + 1).reindex_like(psser2).sort_index(),
)
# Reindexing MultiIndex on single Index
index2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["index3", "index4"]
)
pser2 = pd.Series(data=data2, index=index2, name="name2")
psser2 = ps.from_pandas(pser2)
self.assert_eq(
pser.reindex_like(pser2).sort_index(),
psser.reindex_like(psser2).sort_index(),
)
self.assertRaises(TypeError, lambda: psser.reindex_like(index2))
self.assertRaises(AssertionError, lambda: psser2.reindex_like(psser))
# Reindexing MultiIndex on MultiIndex
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
pser = pd.Series(data=data, index=index, name="name1")
psser = ps.from_pandas(pser)
self.assert_eq(
pser.reindex_like(pser2).sort_index(),
psser.reindex_like(psser2).sort_index(),
)
# Reindexing with DataFrame
index2 = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["name3", "name4"]
)
pdf = pd.DataFrame(data=data, index=index2)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pser.reindex_like(pdf).sort_index(),
psser.reindex_like(psdf).sort_index(),
)
def test_fillna(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.fillna(0), pser.fillna(0))
self.assert_eq(psser.fillna(np.nan).fillna(0), pser.fillna(np.nan).fillna(0))
psser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
# test considering series does not have NA/NaN values
psser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(psser, pser)
psser = psdf.x.rename("y")
pser = pdf.x.rename("y")
psser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(psser.head(), pser.head())
pser = pd.Series([1, 2, 3, 4, 5, 6], name="x")
psser = ps.from_pandas(pser)
pser.loc[3] = np.nan
psser.loc[3] = np.nan
self.assert_eq(psser.fillna(0), pser.fillna(0))
self.assert_eq(psser.fillna(method="ffill"), pser.fillna(method="ffill"))
self.assert_eq(psser.fillna(method="bfill"), pser.fillna(method="bfill"))
# inplace fillna on non-nullable column
pdf = pd.DataFrame({"a": [1, 2, None], "b": [1, 2, 3]})
psdf = ps.from_pandas(pdf)
pser = pdf.b
psser = psdf.b
self.assert_eq(psser.fillna(0), pser.fillna(0))
self.assert_eq(psser.fillna(np.nan).fillna(0), pser.fillna(np.nan).fillna(0))
psser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
def test_dropna(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.dropna(), pser.dropna())
pser.dropna(inplace=True)
psser.dropna(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
def test_nunique(self):
pser = pd.Series([1, 2, 1, np.nan])
psser = ps.from_pandas(pser)
# Assert NaNs are dropped by default
nunique_result = psser.nunique()
self.assertEqual(nunique_result, 2)
self.assert_eq(nunique_result, pser.nunique())
# Assert including NaN values
nunique_result = psser.nunique(dropna=False)
self.assertEqual(nunique_result, 3)
self.assert_eq(nunique_result, pser.nunique(dropna=False))
# Assert approximate counts
self.assertEqual(ps.Series(range(100)).nunique(approx=True), 103)
self.assertEqual(ps.Series(range(100)).nunique(approx=True, rsd=0.01), 100)
def test_value_counts(self):
# this is also containing test for Index & MultiIndex
pser = pd.Series(
[1, 2, 1, 3, 3, np.nan, 1, 4, 2, np.nan, 3, np.nan, 3, 1, 3],
index=[1, 2, 1, 3, 3, np.nan, 1, 4, 2, np.nan, 3, np.nan, 3, 1, 3],
name="x",
)
psser = ps.from_pandas(pser)
exp = pser.value_counts()
res = psser.value_counts()
self.assertEqual(res.name, exp.name)
self.assert_eq(res, exp)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
with self.assertRaisesRegex(
NotImplementedError, "value_counts currently does not support bins"
):
psser.value_counts(bins=3)
pser.name = "index"
psser.name = "index"
self.assert_eq(psser.value_counts(), pser.value_counts())
# Series from DataFrame
pdf = pd.DataFrame({"a": [2, 2, 3], "b": [None, 1, None]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.value_counts(normalize=True), pdf.a.value_counts(normalize=True))
self.assert_eq(psdf.a.value_counts(ascending=True), pdf.a.value_counts(ascending=True))
self.assert_eq(
psdf.a.value_counts(normalize=True, dropna=False),
pdf.a.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psdf.a.value_counts(ascending=True, dropna=False),
pdf.a.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
# Series with NaN index
pser = pd.Series([3, 2, 3, 1, 2, 3], index=[2.0, None, 5.0, 5.0, None, 5.0])
psser = ps.from_pandas(pser)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
psser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
# Series with MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
psser = ps.from_pandas(pser)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
psser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
# Series with MultiIndex some of index has NaN
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", None), ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
psser = ps.from_pandas(pser)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
psser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
# Series with MultiIndex some of index is NaN.
# This test only available for pandas >= 0.24.
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), None, ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
psser = ps.from_pandas(pser)
self.assert_eq(psser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(psser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
psser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
psser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
psser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
psser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
psser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
def test_nsmallest(self):
sample_lst = [1, 2, 3, 4, np.nan, 6]
pser = pd.Series(sample_lst, name="x")
psser = ps.Series(sample_lst, name="x")
self.assert_eq(psser.nsmallest(n=3), pser.nsmallest(n=3))
self.assert_eq(psser.nsmallest(), pser.nsmallest())
self.assert_eq((psser + 1).nsmallest(), (pser + 1).nsmallest())
def test_nlargest(self):
sample_lst = [1, 2, 3, 4, np.nan, 6]
pser = pd.Series(sample_lst, name="x")
psser = ps.Series(sample_lst, name="x")
self.assert_eq(psser.nlargest(n=3), pser.nlargest(n=3))
self.assert_eq(psser.nlargest(), pser.nlargest())
self.assert_eq((psser + 1).nlargest(), (pser + 1).nlargest())
def test_notnull(self):
pser = pd.Series([1, 2, 3, 4, np.nan, 6], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(psser.notnull(), pser.notnull())
pser = self.pser
psser = self.psser
self.assert_eq(psser.notnull(), pser.notnull())
def test_all(self):
for pser in [
pd.Series([True, True], name="x"),
pd.Series([True, False], name="x"),
pd.Series([0, 1], name="x"),
pd.Series([1, 2, 3], name="x"),
pd.Series([True, True, None], name="x"),
pd.Series([True, False, None], name="x"),
pd.Series([], name="x"),
pd.Series([np.nan], name="x"),
]:
psser = ps.from_pandas(pser)
self.assert_eq(psser.all(), pser.all())
pser = pd.Series([1, 2, 3, 4], name="x")
psser = ps.from_pandas(pser)
self.assert_eq((psser % 2 == 0).all(), (pser % 2 == 0).all())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psser.all(axis=1)
def test_any(self):
for pser in [
pd.Series([False, False], name="x"),
pd.Series([True, False], name="x"),
pd.Series([0, 1], name="x"),
pd.Series([1, 2, 3], name="x"),
pd.Series([True, True, None], name="x"),
pd.Series([True, False, None], name="x"),
pd.Series([], name="x"),
pd.Series([np.nan], name="x"),
]:
psser = ps.from_pandas(pser)
self.assert_eq(psser.any(), pser.any())
pser = pd.Series([1, 2, 3, 4], name="x")
psser = ps.from_pandas(pser)
self.assert_eq((psser % 2 == 0).any(), (pser % 2 == 0).any())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psser.any(axis=1)
def test_reset_index(self):
pdf = pd.DataFrame({"foo": [1, 2, 3, 4]}, index=pd.Index(["a", "b", "c", "d"], name="idx"))
psdf = ps.from_pandas(pdf)
pser = pdf.foo
psser = psdf.foo
self.assert_eq(psser.reset_index(), pser.reset_index())
self.assert_eq(psser.reset_index(name="values"), pser.reset_index(name="values"))
self.assert_eq(psser.reset_index(drop=True), pser.reset_index(drop=True))
# inplace
psser.reset_index(drop=True, inplace=True)
pser.reset_index(drop=True, inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
def test_reset_index_with_default_index_types(self):
pser = pd.Series([1, 2, 3], name="0", index=np.random.rand(3))
psser = ps.from_pandas(pser)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psser.reset_index(), pser.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
# the order might be changed.
self.assert_eq(psser.reset_index().sort_index(), pser.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(
psser.reset_index().to_pandas().reset_index(drop=True), pser.reset_index()
)
def test_index_to_series_reset_index(self):
def check(psser, pser):
self.assert_eq(psser.reset_index(), pser.reset_index())
self.assert_eq(psser.reset_index(drop=True), pser.reset_index(drop=True))
pser.reset_index(drop=True, inplace=True)
psser.reset_index(drop=True, inplace=True)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
check(psdf.index.to_series(), pdf.index.to_series())
check(psdf.index.to_series(name="a"), pdf.index.to_series(name="a"))
check(psdf.index.to_series(name=("x", "a")), pdf.index.to_series(name=("x", "a")))
def test_sort_values(self):
pdf = pd.DataFrame({"x": [1, 2, 3, 4, 5, None, 7]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.sort_values(), pser.sort_values())
self.assert_eq(psser.sort_values(ascending=False), pser.sort_values(ascending=False))
self.assert_eq(
psser.sort_values(na_position="first"), pser.sort_values(na_position="first")
)
self.assertRaises(ValueError, lambda: psser.sort_values(na_position="invalid"))
# inplace
# pandas raises an exception when the Series is derived from DataFrame
psser.sort_values(inplace=True)
self.assert_eq(psser, pser.sort_values())
self.assert_eq(psdf, pdf)
pser = pdf.x.copy()
psser = psdf.x.copy()
psser.sort_values(inplace=True)
pser.sort_values(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
def test_sort_index(self):
pdf = pd.DataFrame({"x": [2, 1, np.nan]}, index=["b", "a", np.nan])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psser.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psser.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psser.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psser.sort_index(), pser.sort_index())
# Assert sorting descending
self.assert_eq(psser.sort_index(ascending=False), pser.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psser.sort_index(na_position="first"), pser.sort_index(na_position="first"))
# Assert sorting inplace
# pandas sorts pdf.x by the index and update the column only
# when the Series is derived from DataFrame.
psser.sort_index(inplace=True)
self.assert_eq(psser, pser.sort_index())
self.assert_eq(psdf, pdf)
pser = pdf.x.copy()
psser = psdf.x.copy()
psser.sort_index(inplace=True)
pser.sort_index(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
# Assert multi-indices
pser = pd.Series(range(4), index=[["b", "b", "a", "a"], [1, 0, 1, 0]], name="0")
psser = ps.from_pandas(pser)
self.assert_eq(psser.sort_index(), pser.sort_index())
self.assert_eq(psser.sort_index(level=[1, 0]), pser.sort_index(level=[1, 0]))
self.assert_eq(psser.reset_index().sort_index(), pser.reset_index().sort_index())
def test_to_datetime(self):
pser = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 100)
psser = ps.from_pandas(pser)
self.assert_eq(
pd.to_datetime(pser, infer_datetime_format=True),
ps.to_datetime(psser, infer_datetime_format=True),
)
def test_missing(self):
psser = self.psser
missing_functions = inspect.getmembers(MissingPandasLikeSeries, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psser, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Series.*{}.*is deprecated".format(name)
):
getattr(psser, name)()
missing_properties = inspect.getmembers(
MissingPandasLikeSeries, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psser, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Series.*{}.*is deprecated".format(name)
):
getattr(psser, name)
def test_clip(self):
pser = pd.Series([0, 2, 4], index=np.random.rand(3))
psser = ps.from_pandas(pser)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psser.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psser.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psser.clip(), pser.clip())
# Assert lower only
self.assert_eq(psser.clip(1), pser.clip(1))
# Assert upper only
self.assert_eq(psser.clip(upper=3), pser.clip(upper=3))
# Assert lower and upper
self.assert_eq(psser.clip(1, 3), pser.clip(1, 3))
# Assert behavior on string values
str_psser = ps.Series(["a", "b", "c"])
self.assert_eq(str_psser.clip(1, 3), str_psser)
def test_compare(self):
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
pser = pd.Series([1, 2])
psser = ps.from_pandas(pser)
res_psdf = psser.compare(psser)
self.assertTrue(res_psdf.empty)
self.assert_eq(res_psdf.columns, pd.Index(["self", "other"]))
self.assert_eq(
pser.compare(pser + 1).sort_index(), psser.compare(psser + 1).sort_index()
)
pser = pd.Series([1, 2], index=["x", "y"])
psser = ps.from_pandas(pser)
self.assert_eq(
pser.compare(pser + 1).sort_index(), psser.compare(psser + 1).sort_index()
)
else:
psser = ps.Series([1, 2])
res_psdf = psser.compare(psser)
self.assertTrue(res_psdf.empty)
self.assert_eq(res_psdf.columns, pd.Index(["self", "other"]))
expected = ps.DataFrame([[1, 2], [2, 3]], columns=["self", "other"])
self.assert_eq(expected, psser.compare(psser + 1).sort_index())
psser = ps.Series([1, 2], index=["x", "y"])
expected = ps.DataFrame([[1, 2], [2, 3]], index=["x", "y"], columns=["self", "other"])
self.assert_eq(expected, psser.compare(psser + 1).sort_index())
def test_is_unique(self):
# We can't use pandas' is_unique for comparison. pandas 0.23 ignores None
pser = pd.Series([1, 2, 2, None, None])
psser = ps.from_pandas(pser)
self.assertEqual(False, psser.is_unique)
self.assertEqual(False, (psser + 1).is_unique)
pser = pd.Series([1, None, None])
psser = ps.from_pandas(pser)
self.assertEqual(False, psser.is_unique)
self.assertEqual(False, (psser + 1).is_unique)
pser = pd.Series([1])
psser = ps.from_pandas(pser)
self.assertEqual(pser.is_unique, psser.is_unique)
self.assertEqual((pser + 1).is_unique, (psser + 1).is_unique)
pser = pd.Series([1, 1, 1])
psser = ps.from_pandas(pser)
self.assertEqual(pser.is_unique, psser.is_unique)
self.assertEqual((pser + 1).is_unique, (psser + 1).is_unique)
def test_to_list(self):
self.assert_eq(self.psser.tolist(), self.pser.tolist())
def test_append(self):
pser1 = pd.Series([1, 2, 3], name="0")
pser2 = pd.Series([4, 5, 6], name="0")
pser3 = pd.Series([4, 5, 6], index=[3, 4, 5], name="0")
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
psser3 = ps.from_pandas(pser3)
self.assert_eq(psser1.append(psser2), pser1.append(pser2))
self.assert_eq(psser1.append(psser3), pser1.append(pser3))
self.assert_eq(
psser1.append(psser2, ignore_index=True), pser1.append(pser2, ignore_index=True)
)
psser1.append(psser3, verify_integrity=True)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psser1.append(psser2, verify_integrity=True)
def test_map(self):
pser = pd.Series(["cat", "dog", None, "rabbit"])
psser = ps.from_pandas(pser)
# Currently Koalas doesn't return NaN as pandas does.
self.assert_eq(psser.map({}), pser.map({}).replace({pd.np.nan: None}))
d = defaultdict(lambda: "abc")
self.assertTrue("abc" in repr(psser.map(d)))
self.assert_eq(psser.map(d), pser.map(d))
def tomorrow(date) -> datetime:
return date + timedelta(days=1)
pser = pd.Series([datetime(2019, 10, 24)])
psser = ps.from_pandas(pser)
self.assert_eq(psser.map(tomorrow), pser.map(tomorrow))
def test_add_prefix(self):
pser = pd.Series([1, 2, 3, 4], name="0")
psser = ps.from_pandas(pser)
self.assert_eq(pser.add_prefix("item_"), psser.add_prefix("item_"))
pser = pd.Series(
[1, 2, 3],
name="0",
index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]),
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.add_prefix("item_"), psser.add_prefix("item_"))
def test_add_suffix(self):
pser = pd.Series([1, 2, 3, 4], name="0")
psser = ps.from_pandas(pser)
self.assert_eq(pser.add_suffix("_item"), psser.add_suffix("_item"))
pser = pd.Series(
[1, 2, 3],
name="0",
index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]),
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.add_suffix("_item"), psser.add_suffix("_item"))
def test_cummin(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cummin(), psser.cummin())
self.assert_eq(pser.cummin(skipna=False), psser.cummin(skipna=False))
self.assert_eq(pser.cummin().sum(), psser.cummin().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
psser = ps.from_pandas(pser)
self.assert_eq(pser.cummin(), psser.cummin())
self.assert_eq(pser.cummin(skipna=False), psser.cummin(skipna=False))
def test_cummax(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cummax(), psser.cummax())
self.assert_eq(pser.cummax(skipna=False), psser.cummax(skipna=False))
self.assert_eq(pser.cummax().sum(), psser.cummax().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
psser = ps.from_pandas(pser)
self.assert_eq(pser.cummax(), psser.cummax())
self.assert_eq(pser.cummax(skipna=False), psser.cummax(skipna=False))
def test_cumsum(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumsum(), psser.cumsum())
self.assert_eq(pser.cumsum(skipna=False), psser.cumsum(skipna=False))
self.assert_eq(pser.cumsum().sum(), psser.cumsum().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumsum(), psser.cumsum())
self.assert_eq(pser.cumsum(skipna=False), psser.cumsum(skipna=False))
# bool
pser = pd.Series([True, True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumsum().astype(int), psser.cumsum())
self.assert_eq(pser.cumsum(skipna=False).astype(int), psser.cumsum(skipna=False))
def test_cumprod(self):
pser = pd.Series([1.0, None, 1.0, 4.0, 9.0])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
self.assert_eq(pser.cumprod().sum(), psser.cumprod().sum())
# with integer type
pser = pd.Series([1, 10, 1, 4, 9])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
self.assert_eq(pser.cumprod().sum(), psser.cumprod().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
# including zero
pser = pd.Series([1, 2, 0, 3])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
# including negative values
pser = pd.Series([1, -1, -2])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), psser.cumprod(skipna=False))
# bool
pser = pd.Series([True, True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.cumprod(), psser.cumprod())
self.assert_eq(pser.cumprod(skipna=False).astype(int), psser.cumprod(skipna=False))
def test_median(self):
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).median(accuracy="a")
def test_rank(self):
pser = pd.Series([1, 2, 3, 1], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(pser.rank(), psser.rank().sort_index())
self.assert_eq(pser.rank().sum(), psser.rank().sum())
self.assert_eq(pser.rank(ascending=False), psser.rank(ascending=False).sort_index())
self.assert_eq(pser.rank(method="min"), psser.rank(method="min").sort_index())
self.assert_eq(pser.rank(method="max"), psser.rank(method="max").sort_index())
self.assert_eq(pser.rank(method="first"), psser.rank(method="first").sort_index())
self.assert_eq(pser.rank(method="dense"), psser.rank(method="dense").sort_index())
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
with self.assertRaisesRegex(ValueError, msg):
psser.rank(method="nothing")
def test_round(self):
pser = pd.Series([0.028208, 0.038683, 0.877076], name="x")
psser = ps.from_pandas(pser)
self.assert_eq(pser.round(2), psser.round(2))
msg = "decimals must be an integer"
with self.assertRaisesRegex(TypeError, msg):
psser.round(1.5)
def test_quantile(self):
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(psser.quantile(0.5), pser.quantile(0.5))
self.assert_eq(psser.quantile([0.25, 0.5, 0.75]), pser.quantile([0.25, 0.5, 0.75]))
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(accuracy="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
ps.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q=["a"])
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
ps.Series(["a", "b", "c"]).quantile()
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
ps.Series(["a", "b", "c"]).quantile([0.25, 0.5, 0.75])
def test_idxmax(self):
pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"])
psser = ps.Series(pser)
self.assertEqual(psser.idxmax(), pser.idxmax())
self.assertEqual(psser.idxmax(skipna=False), pser.idxmax(skipna=False))
index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second")
)
pser = pd.Series(data=[1, 2, 4, 5], index=index)
psser = ps.Series(pser)
self.assertEqual(psser.idxmax(), pser.idxmax())
self.assertEqual(psser.idxmax(skipna=False), pser.idxmax(skipna=False))
psser = ps.Series([])
with self.assertRaisesRegex(ValueError, "an empty sequence"):
psser.idxmax()
pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
psser = ps.Series(pser)
self.assertEqual(psser.idxmax(), pser.idxmax())
self.assertEqual(repr(psser.idxmax(skipna=False)), repr(pser.idxmax(skipna=False)))
def test_idxmin(self):
pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"])
psser = ps.Series(pser)
self.assertEqual(psser.idxmin(), pser.idxmin())
self.assertEqual(psser.idxmin(skipna=False), pser.idxmin(skipna=False))
index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second")
)
pser = pd.Series(data=[1, 2, 4, 5], index=index)
psser = ps.Series(pser)
self.assertEqual(psser.idxmin(), pser.idxmin())
self.assertEqual(psser.idxmin(skipna=False), pser.idxmin(skipna=False))
psser = ps.Series([])
with self.assertRaisesRegex(ValueError, "an empty sequence"):
psser.idxmin()
pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
psser = ps.Series(pser)
self.assertEqual(psser.idxmin(), pser.idxmin())
self.assertEqual(repr(psser.idxmin(skipna=False)), repr(pser.idxmin(skipna=False)))
def test_shift(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.shift(2), pser.shift(2))
self.assert_eq(psser.shift().shift(-1), pser.shift().shift(-1))
self.assert_eq(psser.shift().sum(), pser.shift().sum())
if LooseVersion(pd.__version__) < LooseVersion("0.24.2"):
self.assert_eq(psser.shift(periods=2), pser.shift(periods=2))
else:
self.assert_eq(
psser.shift(periods=2, fill_value=0), pser.shift(periods=2, fill_value=0)
)
with self.assertRaisesRegex(TypeError, "periods should be an int; however"):
psser.shift(periods=1.5)
def test_diff(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.diff(2), pser.diff(2))
self.assert_eq(psser.diff().diff(-1), pser.diff().diff(-1))
self.assert_eq(psser.diff().sum(), pser.diff().sum())
def _test_numeric_astype(self, pser):
psser = ps.Series(pser)
self.assert_eq(psser.astype(int), pser.astype(int))
self.assert_eq(psser.astype(np.int), pser.astype(np.int))
self.assert_eq(psser.astype(np.int8), pser.astype(np.int8))
self.assert_eq(psser.astype(np.int16), pser.astype(np.int16))
self.assert_eq(psser.astype(np.int32), pser.astype(np.int32))
self.assert_eq(psser.astype(np.int64), pser.astype(np.int64))
self.assert_eq(psser.astype(np.byte), pser.astype(np.byte))
self.assert_eq(psser.astype("int"), pser.astype("int"))
self.assert_eq(psser.astype("int8"), pser.astype("int8"))
self.assert_eq(psser.astype("int16"), pser.astype("int16"))
self.assert_eq(psser.astype("int32"), pser.astype("int32"))
self.assert_eq(psser.astype("int64"), pser.astype("int64"))
self.assert_eq(psser.astype("b"), pser.astype("b"))
self.assert_eq(psser.astype("byte"), pser.astype("byte"))
self.assert_eq(psser.astype("i"), pser.astype("i"))
self.assert_eq(psser.astype("long"), pser.astype("long"))
self.assert_eq(psser.astype("short"), pser.astype("short"))
self.assert_eq(psser.astype(np.float), pser.astype(np.float))
self.assert_eq(psser.astype(np.float32), pser.astype(np.float32))
self.assert_eq(psser.astype(np.float64), pser.astype(np.float64))
self.assert_eq(psser.astype("float"), pser.astype("float"))
self.assert_eq(psser.astype("float32"), pser.astype("float32"))
self.assert_eq(psser.astype("float64"), pser.astype("float64"))
self.assert_eq(psser.astype("double"), pser.astype("double"))
self.assert_eq(psser.astype("f"), pser.astype("f"))
self.assert_eq(psser.astype(bool), pser.astype(bool))
self.assert_eq(psser.astype("bool"), pser.astype("bool"))
self.assert_eq(psser.astype("?"), pser.astype("?"))
self.assert_eq(psser.astype(np.unicode_), pser.astype(np.unicode_))
self.assert_eq(psser.astype("str"), pser.astype("str"))
self.assert_eq(psser.astype("U"), pser.astype("U"))
if extension_dtypes_available:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
self._check_extension(psser.astype("Int8"), pser.astype("Int8"))
self._check_extension(psser.astype("Int16"), pser.astype("Int16"))
self._check_extension(psser.astype("Int32"), pser.astype("Int32"))
self._check_extension(psser.astype("Int64"), pser.astype("Int64"))
self._check_extension(psser.astype(Int8Dtype()), pser.astype(Int8Dtype()))
self._check_extension(psser.astype(Int16Dtype()), pser.astype(Int16Dtype()))
self._check_extension(psser.astype(Int32Dtype()), pser.astype(Int32Dtype()))
self._check_extension(psser.astype(Int64Dtype()), pser.astype(Int64Dtype()))
if extension_object_dtypes_available:
from pandas import StringDtype
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self._check_extension(psser.astype("string"), pser.astype("string"))
self._check_extension(psser.astype(StringDtype()), pser.astype(StringDtype()))
else:
self._check_extension(
psser.astype("string"),
pd.Series(["10", "20", "15", "30", "45"], name="x", dtype="string"),
)
self._check_extension(
psser.astype(StringDtype()),
pd.Series(["10", "20", "15", "30", "45"], name="x", dtype=StringDtype()),
)
if extension_float_dtypes_available:
from pandas import Float32Dtype, Float64Dtype
self._check_extension(psser.astype("Float32"), pser.astype("Float32"))
self._check_extension(psser.astype("Float64"), pser.astype("Float64"))
self._check_extension(psser.astype(Float32Dtype()), pser.astype(Float32Dtype()))
self._check_extension(psser.astype(Float64Dtype()), pser.astype(Float64Dtype()))
def test_astype(self):
psers = [pd.Series([10, 20, 15, 30, 45], name="x")]
if extension_dtypes_available:
psers.append(pd.Series([10, 20, 15, 30, 45], name="x", dtype="Int64"))
if extension_float_dtypes_available:
psers.append(pd.Series([10, 20, 15, 30, 45], name="x", dtype="Float64"))
for pser in psers:
self._test_numeric_astype(pser)
pser = pd.Series([10, 20, 15, 30, 45, None, np.nan], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.astype(bool), pser.astype(bool))
self.assert_eq(psser.astype(str), pser.astype(str))
pser = pd.Series(["hi", "hi ", " ", " \t", "", None], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.astype(bool), pser.astype(bool))
if LooseVersion("1.1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.1.4"):
# a pandas bug: https://github.com/databricks/koalas/pull/1818#issuecomment-703961980
self.assert_eq(psser.astype(str).tolist(), ["hi", "hi ", " ", " \t", "", "None"])
else:
self.assert_eq(psser.astype(str), pser.astype(str))
self.assert_eq(psser.str.strip().astype(bool), pser.str.strip().astype(bool))
if extension_object_dtypes_available:
from pandas import StringDtype
self._check_extension(psser.astype("string"), pser.astype("string"))
self._check_extension(psser.astype(StringDtype()), pser.astype(StringDtype()))
pser = pd.Series([True, False, None], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.astype(bool), pser.astype(bool))
self.assert_eq(psser.astype(str), pser.astype(str))
if extension_object_dtypes_available:
from pandas import BooleanDtype, StringDtype
self._check_extension(psser.astype("boolean"), pser.astype("boolean"))
self._check_extension(psser.astype(BooleanDtype()), pser.astype(BooleanDtype()))
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self._check_extension(psser.astype("string"), pser.astype("string"))
self._check_extension(psser.astype(StringDtype()), pser.astype(StringDtype()))
else:
self._check_extension(
psser.astype("string"),
pd.Series(["True", "False", None], name="x", dtype="string"),
)
self._check_extension(
psser.astype(StringDtype()),
pd.Series(["True", "False", None], name="x", dtype=StringDtype()),
)
pser = pd.Series(["2020-10-27 00:00:01", None], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.astype(np.datetime64), pser.astype(np.datetime64))
self.assert_eq(psser.astype("datetime64[ns]"), pser.astype("datetime64[ns]"))
self.assert_eq(psser.astype("M"), pser.astype("M"))
self.assert_eq(psser.astype("M").astype(str), pser.astype("M").astype(str))
# Comment out the below test cause because pandas returns `NaT` or `nan` randomly
# self.assert_eq(
# psser.astype("M").dt.date.astype(str), pser.astype("M").dt.date.astype(str)
# )
if extension_object_dtypes_available:
from pandas import StringDtype
self._check_extension(
psser.astype("M").astype("string"), pser.astype("M").astype("string")
)
self._check_extension(
psser.astype("M").astype(StringDtype()), pser.astype("M").astype(StringDtype())
)
with self.assertRaisesRegex(TypeError, "not understood"):
psser.astype("int63")
def test_aggregate(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
msg = "func must be a string or list of strings"
with self.assertRaisesRegex(TypeError, msg):
psser.aggregate({"x": ["min", "max"]})
msg = (
"If the given function is a list, it " "should only contains function names as strings."
)
with self.assertRaisesRegex(ValueError, msg):
psser.aggregate(["min", max])
def test_drop(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.drop(1), pser.drop(1))
self.assert_eq(psser.drop([1, 4]), pser.drop([1, 4]))
msg = "Need to specify at least one of 'labels' or 'index'"
with self.assertRaisesRegex(ValueError, msg):
psser.drop()
self.assertRaises(KeyError, lambda: psser.drop((0, 1)))
# For MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.drop("lama"), pser.drop("lama"))
self.assert_eq(psser.drop(labels="weight", level=1), pser.drop(labels="weight", level=1))
self.assert_eq(psser.drop(("lama", "weight")), pser.drop(("lama", "weight")))
self.assert_eq(
psser.drop([("lama", "speed"), ("falcon", "weight")]),
pser.drop([("lama", "speed"), ("falcon", "weight")]),
)
self.assert_eq(psser.drop({"lama": "speed"}), pser.drop({"lama": "speed"}))
msg = "'level' should be less than the number of indexes"
with self.assertRaisesRegex(ValueError, msg):
psser.drop(labels="weight", level=2)
msg = (
"If the given index is a list, it "
"should only contains names as all tuples or all non tuples "
"that contain index names"
)
with self.assertRaisesRegex(ValueError, msg):
psser.drop(["lama", ["cow", "falcon"]])
msg = "Cannot specify both 'labels' and 'index'"
with self.assertRaisesRegex(ValueError, msg):
psser.drop("lama", index="cow")
msg = r"'Key length \(2\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psser.drop(("lama", "speed", "x"))
def test_pop(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pdf = pd.DataFrame({"x": [45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3]}, index=midx)
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.pop(("lama", "speed")), pser.pop(("lama", "speed")))
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
msg = r"'Key length \(3\) exceeds index depth \(2\)'"
with self.assertRaisesRegex(KeyError, msg):
psser.pop(("lama", "speed", "x"))
def test_replace(self):
pser = pd.Series([10, 20, 15, 30, np.nan], name="x")
psser = ps.Series(pser)
self.assert_eq(psser.replace(), pser.replace())
self.assert_eq(psser.replace({}), pser.replace({}))
self.assert_eq(psser.replace(np.nan, 45), pser.replace(np.nan, 45))
self.assert_eq(psser.replace([10, 15], 45), pser.replace([10, 15], 45))
self.assert_eq(psser.replace((10, 15), 45), pser.replace((10, 15), 45))
self.assert_eq(psser.replace([10, 15], [45, 50]), pser.replace([10, 15], [45, 50]))
self.assert_eq(psser.replace((10, 15), (45, 50)), pser.replace((10, 15), (45, 50)))
msg = "'to_replace' should be one of str, list, tuple, dict, int, float"
with self.assertRaisesRegex(TypeError, msg):
psser.replace(ps.range(5))
msg = "Replacement lists must match in length. Expecting 3 got 2"
with self.assertRaisesRegex(ValueError, msg):
psser.replace([10, 20, 30], [1, 2])
msg = "replace currently not support for regex"
with self.assertRaisesRegex(NotImplementedError, msg):
psser.replace(r"^1.$", regex=True)
def test_xs(self):
midx = pd.MultiIndex(
[["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.xs(("a", "lama", "speed")), pser.xs(("a", "lama", "speed")))
def test_duplicates(self):
psers = {
"test on texts": pd.Series(
["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal"
),
"test on numbers": pd.Series([1, 1, 2, 4, 3]),
}
keeps = ["first", "last", False]
for (msg, pser), keep in product(psers.items(), keeps):
with self.subTest(msg, keep=keep):
psser = ps.Series(pser)
self.assert_eq(
pser.drop_duplicates(keep=keep).sort_values(),
psser.drop_duplicates(keep=keep).sort_values(),
)
def test_update(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
psser = ps.Series(pser)
msg = "'other' must be a Series"
with self.assertRaisesRegex(TypeError, msg):
psser.update(10)
def test_where(self):
pser1 = pd.Series([0, 1, 2, 3, 4])
psser1 = ps.from_pandas(pser1)
self.assert_eq(pser1.where(pser1 > 3), psser1.where(psser1 > 3).sort_index())
def test_mask(self):
pser1 = pd.Series([0, 1, 2, 3, 4])
psser1 = ps.from_pandas(pser1)
self.assert_eq(pser1.mask(pser1 > 3), psser1.mask(psser1 > 3).sort_index())
def test_truncate(self):
pser1 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7])
psser1 = ps.Series(pser1)
pser2 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[7, 6, 5, 4, 3, 2, 1])
psser2 = ps.Series(pser2)
self.assert_eq(psser1.truncate(), pser1.truncate())
self.assert_eq(psser1.truncate(before=2), pser1.truncate(before=2))
self.assert_eq(psser1.truncate(after=5), pser1.truncate(after=5))
self.assert_eq(psser1.truncate(copy=False), pser1.truncate(copy=False))
self.assert_eq(psser1.truncate(2, 5, copy=False), pser1.truncate(2, 5, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psser2.truncate(4, 6), pser2.truncate(4, 6))
self.assert_eq(psser2.truncate(4, 6, copy=False), pser2.truncate(4, 6, copy=False))
else:
expected_psser = ps.Series([20, 30, 40], index=[6, 5, 4])
self.assert_eq(psser2.truncate(4, 6), expected_psser)
self.assert_eq(psser2.truncate(4, 6, copy=False), expected_psser)
psser = ps.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 3, 2, 1])
msg = "truncate requires a sorted index"
with self.assertRaisesRegex(ValueError, msg):
psser.truncate()
psser = ps.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7])
msg = "Truncate: 2 must be after 5"
with self.assertRaisesRegex(ValueError, msg):
psser.truncate(5, 2)
def test_getitem(self):
pser = pd.Series([10, 20, 15, 30, 45], ["A", "A", "B", "C", "D"])
psser = ps.Series(pser)
self.assert_eq(psser["A"], pser["A"])
self.assert_eq(psser["B"], pser["B"])
self.assert_eq(psser[psser > 15], pser[pser > 15])
# for MultiIndex
midx = pd.MultiIndex(
[["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], name="0", index=midx)
psser = ps.Series(pser)
self.assert_eq(psser["a"], pser["a"])
self.assert_eq(psser["a", "lama"], pser["a", "lama"])
self.assert_eq(psser[psser > 1.5], pser[pser > 1.5])
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psser[("a", "lama", "speed", "x")]
def test_keys(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.keys(), pser.keys())
def test_index(self):
# to check setting name of Index properly.
idx = pd.Index([1, 2, 3, 4, 5, 6, 7, 8, 9])
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=idx)
psser = ps.from_pandas(pser)
psser.name = "koalas"
pser.name = "koalas"
self.assert_eq(psser.index.name, pser.index.name)
# for check setting names of MultiIndex properly.
psser.names = ["hello", "koalas"]
pser.names = ["hello", "koalas"]
self.assert_eq(psser.index.names, pser.index.names)
def test_pct_change(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
psser = ps.from_pandas(pser)
self.assert_eq(psser.pct_change(), pser.pct_change(), check_exact=False)
self.assert_eq(psser.pct_change().sum(), pser.pct_change().sum(), almost=True)
self.assert_eq(psser.pct_change(periods=2), pser.pct_change(periods=2), check_exact=False)
self.assert_eq(psser.pct_change(periods=-1), pser.pct_change(periods=-1), check_exact=False)
self.assert_eq(psser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000))
self.assert_eq(psser.pct_change(periods=100000000), pser.pct_change(periods=100000000))
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.pct_change(), pser.pct_change(), check_exact=False)
self.assert_eq(psser.pct_change().sum(), pser.pct_change().sum(), almost=True)
self.assert_eq(psser.pct_change(periods=2), pser.pct_change(periods=2), check_exact=False)
self.assert_eq(psser.pct_change(periods=-1), pser.pct_change(periods=-1), check_exact=False)
self.assert_eq(psser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000))
self.assert_eq(psser.pct_change(periods=100000000), pser.pct_change(periods=100000000))
def test_axes(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
psser = ps.from_pandas(pser)
self.assert_eq(psser.axes, pser.axes)
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.axes, pser.axes)
def test_udt(self):
sparse_values = {0: 0.1, 1: 1.1}
sparse_vector = SparseVector(len(sparse_values), sparse_values)
pser = pd.Series([sparse_vector])
psser = ps.from_pandas(pser)
self.assert_eq(psser, pser)
def test_repeat(self):
pser = pd.Series(["a", "b", "c"], name="0", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(psser.repeat(3).sort_index(), pser.repeat(3).sort_index())
self.assert_eq(psser.repeat(0).sort_index(), pser.repeat(0).sort_index())
self.assertRaises(ValueError, lambda: psser.repeat(-1))
self.assertRaises(TypeError, lambda: psser.repeat("abc"))
pdf = pd.DataFrame({"a": ["a", "b", "c"], "rep": [10, 20, 30]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.repeat(psdf.rep).sort_index(), pdf.a.repeat(pdf.rep).sort_index())
def test_take(self):
pser = pd.Series([100, 200, 300, 400, 500], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(psser.take([0, 2, 4]).sort_values(), pser.take([0, 2, 4]).sort_values())
self.assert_eq(
psser.take(range(0, 5, 2)).sort_values(), pser.take(range(0, 5, 2)).sort_values()
)
self.assert_eq(psser.take([-4, -2, 0]).sort_values(), pser.take([-4, -2, 0]).sort_values())
self.assert_eq(
psser.take(range(-2, 1, 2)).sort_values(), pser.take(range(-2, 1, 2)).sort_values()
)
# Checking the type of indices.
self.assertRaises(TypeError, lambda: psser.take(1))
self.assertRaises(TypeError, lambda: psser.take("1"))
self.assertRaises(TypeError, lambda: psser.take({1, 2}))
self.assertRaises(TypeError, lambda: psser.take({1: None, 2: None}))
def test_divmod(self):
pser = pd.Series([100, None, 300, None, 500], name="Koalas")
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
kdiv, kmod = psser.divmod(-100)
pdiv, pmod = pser.divmod(-100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
kdiv, kmod = psser.divmod(100)
pdiv, pmod = pser.divmod(100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
kdiv, kmod = psser.divmod(-100)
pdiv, pmod = pser.floordiv(-100), pser.mod(-100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
kdiv, kmod = psser.divmod(100)
pdiv, pmod = pser.floordiv(100), pser.mod(100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
def test_rdivmod(self):
pser = pd.Series([100, None, 300, None, 500])
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
krdiv, krmod = psser.rdivmod(-100)
prdiv, prmod = pser.rdivmod(-100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
krdiv, krmod = psser.rdivmod(100)
prdiv, prmod = pser.rdivmod(100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
krdiv, krmod = psser.rdivmod(-100)
prdiv, prmod = pser.rfloordiv(-100), pser.rmod(-100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
krdiv, krmod = psser.rdivmod(100)
prdiv, prmod = pser.rfloordiv(100), pser.rmod(100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(psser.mod(-150), pser.mod(-150))
self.assert_eq(psser.mod(0), pser.mod(0))
self.assert_eq(psser.mod(150), pser.mod(150))
pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.mod(psdf.b), pdf.a.mod(pdf.b))
def test_mode(self):
pser = pd.Series([0, 0, 1, 1, 1, np.nan, np.nan, np.nan])
psser = ps.from_pandas(pser)
self.assert_eq(psser.mode(), pser.mode())
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `dropna` argument is added in pandas 0.24.
self.assert_eq(
psser.mode(dropna=False).sort_values().reset_index(drop=True),
pser.mode(dropna=False).sort_values().reset_index(drop=True),
)
pser.name = "x"
psser = ps.from_pandas(pser)
self.assert_eq(psser.mode(), pser.mode())
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `dropna` argument is added in pandas 0.24.
self.assert_eq(
psser.mode(dropna=False).sort_values().reset_index(drop=True),
pser.mode(dropna=False).sort_values().reset_index(drop=True),
)
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(psser.rmod(-150), pser.rmod(-150))
self.assert_eq(psser.rmod(0), pser.rmod(0))
self.assert_eq(psser.rmod(150), pser.rmod(150))
pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.rmod(psdf.b), pdf.a.rmod(pdf.b))
def test_asof(self):
pser = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(psser.asof(20), pser.asof(20))
self.assert_eq(psser.asof([5, 20]).sort_index(), pser.asof([5, 20]).sort_index())
self.assert_eq(psser.asof(100), pser.asof(100))
self.assert_eq(repr(psser.asof(-100)), repr(pser.asof(-100)))
self.assert_eq(psser.asof([-100, 100]).sort_index(), pser.asof([-100, 100]).sort_index())
# where cannot be an Index, Series or a DataFrame
self.assertRaises(ValueError, lambda: psser.asof(ps.Index([-100, 100])))
self.assertRaises(ValueError, lambda: psser.asof(ps.Series([-100, 100])))
self.assertRaises(ValueError, lambda: psser.asof(ps.DataFrame({"A": [1, 2, 3]})))
# asof is not supported for a MultiIndex
pser.index = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c"), ("y", "d")])
psser = ps.from_pandas(pser)
self.assertRaises(ValueError, lambda: psser.asof(20))
# asof requires a sorted index (More precisely, should be a monotonic increasing)
psser = ps.Series([1, 2, np.nan, 4], index=[10, 30, 20, 40], name="Koalas")
self.assertRaises(ValueError, lambda: psser.asof(20))
psser = ps.Series([1, 2, np.nan, 4], index=[40, 30, 20, 10], name="Koalas")
self.assertRaises(ValueError, lambda: psser.asof(20))
pidx = pd.DatetimeIndex(["2013-12-31", "2014-01-02", "2014-01-03"])
pser = pd.Series([1, 2, np.nan], index=pidx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.asof("2014-01-01"), pser.asof("2014-01-01"))
self.assert_eq(psser.asof("2014-01-02"), pser.asof("2014-01-02"))
self.assert_eq(repr(psser.asof("1999-01-02")), repr(pser.asof("1999-01-02")))
def test_squeeze(self):
# Single value
pser = pd.Series([90])
psser = ps.from_pandas(pser)
self.assert_eq(psser.squeeze(), pser.squeeze())
# Single value with MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "b", "c")])
pser = pd.Series([90], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.squeeze(), pser.squeeze())
# Multiple values
pser = pd.Series([90, 91, 85])
psser = ps.from_pandas(pser)
self.assert_eq(psser.squeeze(), pser.squeeze())
# Multiple values with MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series([90, 91, 85], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(psser.squeeze(), pser.squeeze())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pser = pd.Series(["a", "b", "c", "d"], index=pidx)
psser = ps.from_pandas(pser)
self.assert_eq(pser.swaplevel(), psser.swaplevel())
self.assert_eq(pser.swaplevel(0, 1), psser.swaplevel(0, 1))
self.assert_eq(pser.swaplevel(1, 1), psser.swaplevel(1, 1))
self.assert_eq(pser.swaplevel("number", "color"), psser.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pser = pd.Series(["a", "b", "c", "d"], index=pidx)
psser = ps.from_pandas(pser)
self.assert_eq(pser.swaplevel(), psser.swaplevel())
self.assert_eq(pser.swaplevel(0, 1), psser.swaplevel(0, 1))
self.assert_eq(pser.swaplevel(0, 2), psser.swaplevel(0, 2))
self.assert_eq(pser.swaplevel(1, 2), psser.swaplevel(1, 2))
self.assert_eq(pser.swaplevel(1, 1), psser.swaplevel(1, 1))
self.assert_eq(pser.swaplevel(-1, -2), psser.swaplevel(-1, -2))
self.assert_eq(pser.swaplevel("number", "color"), psser.swaplevel("number", "color"))
self.assert_eq(pser.swaplevel("number", "size"), psser.swaplevel("number", "size"))
self.assert_eq(pser.swaplevel("color", "size"), psser.swaplevel("color", "size"))
# Error conditions
self.assertRaises(AssertionError, lambda: ps.Series([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psser.swaplevel(0, 9))
self.assertRaises(KeyError, lambda: psser.swaplevel("not_number", "color"))
self.assertRaises(AssertionError, lambda: psser.swaplevel(copy=False))
def test_swapaxes(self):
pser = pd.Series([1, 2, 3], index=["x", "y", "z"], name="ser")
psser = ps.from_pandas(pser)
self.assert_eq(psser.swapaxes(0, 0), pser.swapaxes(0, 0))
self.assert_eq(psser.swapaxes("index", "index"), pser.swapaxes("index", "index"))
self.assert_eq((psser + 1).swapaxes(0, 0), (pser + 1).swapaxes(0, 0))
self.assertRaises(AssertionError, lambda: psser.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psser.swapaxes(0, 1))
self.assertRaises(ValueError, lambda: psser.swapaxes("index", "columns"))
def test_div_zero_and_nan(self):
pser = pd.Series([100, None, -300, None, 500, -700, np.inf, -np.inf], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(pser.div(0), psser.div(0))
self.assert_eq(pser.truediv(0), psser.truediv(0))
self.assert_eq(pser / 0, psser / 0)
self.assert_eq(pser.div(np.nan), psser.div(np.nan))
self.assert_eq(pser.truediv(np.nan), psser.truediv(np.nan))
self.assert_eq(pser / np.nan, psser / np.nan)
# floordiv has different behavior in pandas > 1.0.0 when divide by 0
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(pser.floordiv(0), psser.floordiv(0))
self.assert_eq(pser // 0, psser // 0)
else:
result = pd.Series(
[np.inf, np.nan, -np.inf, np.nan, np.inf, -np.inf, np.inf, -np.inf], name="Koalas"
)
self.assert_eq(psser.floordiv(0), result)
self.assert_eq(psser // 0, result)
self.assert_eq(pser.floordiv(np.nan), psser.floordiv(np.nan))
def test_mad(self):
pser = pd.Series([1, 2, 3, 4], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(pser.mad(), psser.mad())
pser = pd.Series([None, -2, 5, 10, 50, np.nan, -20], name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(pser.mad(), psser.mad())
pmidx = pd.MultiIndex.from_tuples(
[("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")]
)
pser = pd.Series([1, 2, 3, 4, 5], name="Koalas")
pser.index = pmidx
psser = ps.from_pandas(pser)
self.assert_eq(pser.mad(), psser.mad())
pmidx = pd.MultiIndex.from_tuples(
[("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")]
)
pser = pd.Series([None, -2, 5, 50, np.nan], name="Koalas")
pser.index = pmidx
psser = ps.from_pandas(pser)
self.assert_eq(pser.mad(), psser.mad())
def test_to_frame(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.to_frame(name="a"), psser.to_frame(name="a"))
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series(["a", "b", "c"], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(pser.to_frame(name="a"), psser.to_frame(name="a"))
def test_shape(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.shape, psser.shape)
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series(["a", "b", "c"], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(pser.shape, psser.shape)
@unittest.skipIf(not have_tabulate, tabulate_requirement_message)
def test_to_markdown(self):
pser = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal")
psser = ps.from_pandas(pser)
# `to_markdown()` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assertRaises(NotImplementedError, lambda: psser.to_markdown())
else:
self.assert_eq(pser.to_markdown(), psser.to_markdown())
def test_unstack(self):
pser = pd.Series(
[10, -2, 4, 7],
index=pd.MultiIndex.from_tuples(
[("one", "a", "z"), ("one", "b", "x"), ("two", "a", "c"), ("two", "b", "v")],
names=["A", "B", "C"],
),
)
psser = ps.from_pandas(pser)
levels = [-3, -2, -1, 0, 1, 2]
for level in levels:
pandas_result = pser.unstack(level=level)
pandas_on_spark_result = psser.unstack(level=level).sort_index()
self.assert_eq(pandas_result, pandas_on_spark_result)
self.assert_eq(pandas_result.index.names, pandas_on_spark_result.index.names)
self.assert_eq(pandas_result.columns.names, pandas_on_spark_result.columns.names)
# non-numeric datatypes
pser = pd.Series(
list("abcd"), index=pd.MultiIndex.from_product([["one", "two"], ["a", "b"]])
)
psser = ps.from_pandas(pser)
levels = [-2, -1, 0, 1]
for level in levels:
pandas_result = pser.unstack(level=level)
pandas_on_spark_result = psser.unstack(level=level).sort_index()
self.assert_eq(pandas_result, pandas_on_spark_result)
self.assert_eq(pandas_result.index.names, pandas_on_spark_result.index.names)
self.assert_eq(pandas_result.columns.names, pandas_on_spark_result.columns.names)
# Exceeding the range of level
self.assertRaises(IndexError, lambda: psser.unstack(level=3))
self.assertRaises(IndexError, lambda: psser.unstack(level=-4))
# Only support for MultiIndex
psser = ps.Series([10, -2, 4, 7])
self.assertRaises(ValueError, lambda: psser.unstack())
def test_item(self):
psser = ps.Series([10, 20])
self.assertRaises(ValueError, lambda: psser.item())
def test_filter(self):
pser = pd.Series([0, 1, 2], index=["one", "two", "three"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.filter(items=["one", "three"]), psser.filter(items=["one", "three"]))
self.assert_eq(pser.filter(regex="e$"), psser.filter(regex="e$"))
self.assert_eq(pser.filter(like="hre"), psser.filter(like="hre"))
with self.assertRaisesRegex(ValueError, "Series does not support columns axis."):
psser.filter(like="hre", axis=1)
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("one", "x"), ("two", "y"), ("three", "z")])
pser = pd.Series([0, 1, 2], index=midx)
psser = ps.from_pandas(pser)
self.assert_eq(
pser.filter(items=[("one", "x"), ("three", "z")]),
psser.filter(items=[("one", "x"), ("three", "z")]),
)
with self.assertRaisesRegex(TypeError, "Unsupported type list"):
psser.filter(items=[["one", "x"], ("three", "z")])
with self.assertRaisesRegex(ValueError, "The item should not be empty."):
psser.filter(items=[(), ("three", "z")])
def test_abs(self):
pser = pd.Series([-2, -1, 0, 1])
psser = ps.from_pandas(pser)
self.assert_eq(abs(psser), abs(pser))
self.assert_eq(np.abs(psser), np.abs(pser))
def test_bfill(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.bfill(), pser.bfill())
self.assert_eq(psser.bfill()[0], pser.bfill()[0])
psser.bfill(inplace=True)
pser.bfill(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psser[0], pser[0])
self.assert_eq(psdf, pdf)
def test_ffill(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
self.assert_eq(psser.ffill(), pser.ffill())
self.assert_eq(psser.ffill()[4], pser.ffill()[4])
psser.ffill(inplace=True)
pser.ffill(inplace=True)
self.assert_eq(psser, pser)
self.assert_eq(psser[4], pser[4])
self.assert_eq(psdf, pdf)
def test_iteritems(self):
pser = pd.Series(["A", "B", "C"])
psser = ps.from_pandas(pser)
for (p_name, p_items), (k_name, k_items) in zip(pser.iteritems(), psser.iteritems()):
self.assert_eq(p_name, k_name)
self.assert_eq(p_items, k_items)
def test_droplevel(self):
# droplevel is new in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
pser = pd.Series(
[1, 2, 3],
index=pd.MultiIndex.from_tuples(
[("x", "a", "q"), ("x", "b", "w"), ("y", "c", "e")],
names=["level_1", "level_2", "level_3"],
),
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.droplevel(0), psser.droplevel(0))
self.assert_eq(pser.droplevel("level_1"), psser.droplevel("level_1"))
self.assert_eq(pser.droplevel(-1), psser.droplevel(-1))
self.assert_eq(pser.droplevel([0]), psser.droplevel([0]))
self.assert_eq(pser.droplevel(["level_1"]), psser.droplevel(["level_1"]))
self.assert_eq(pser.droplevel((0,)), psser.droplevel((0,)))
self.assert_eq(pser.droplevel(("level_1",)), psser.droplevel(("level_1",)))
self.assert_eq(pser.droplevel([0, 2]), psser.droplevel([0, 2]))
self.assert_eq(
pser.droplevel(["level_1", "level_3"]), psser.droplevel(["level_1", "level_3"])
)
self.assert_eq(pser.droplevel((1, 2)), psser.droplevel((1, 2)))
self.assert_eq(
pser.droplevel(("level_2", "level_3")), psser.droplevel(("level_2", "level_3"))
)
with self.assertRaisesRegex(KeyError, "Level {0, 1, 2} not found"):
psser.droplevel({0, 1, 2})
with self.assertRaisesRegex(KeyError, "Level level_100 not found"):
psser.droplevel(["level_1", "level_100"])
with self.assertRaisesRegex(
IndexError, "Too many levels: Index has only 3 levels, not 11"
):
psser.droplevel(10)
with self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 3 levels, -10 is not a valid level number",
):
psser.droplevel(-10)
with self.assertRaisesRegex(
ValueError,
"Cannot remove 3 levels from an index with 3 levels: "
"at least one level must be left.",
):
psser.droplevel([0, 1, 2])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 5 levels from an index with 3 levels: "
"at least one level must be left.",
):
psser.droplevel([1, 1, 1, 1, 1])
# Tupled names
pser.index.names = [("a", "1"), ("b", "2"), ("c", "3")]
psser = ps.from_pandas(pser)
self.assert_eq(
pser.droplevel([("a", "1"), ("c", "3")]), psser.droplevel([("a", "1"), ("c", "3")])
)
def test_dot(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
psdf = ps.from_pandas(pdf)
self.assert_eq((psdf["b"] * 10).dot(psdf["a"]), (pdf["b"] * 10).dot(pdf["a"]))
self.assert_eq((psdf["b"] * 10).dot(psdf), (pdf["b"] * 10).dot(pdf))
self.assert_eq((psdf["b"] * 10).dot(psdf + 1), (pdf["b"] * 10).dot(pdf + 1))
def test_tail(self):
pser = pd.Series(range(1000), name="Koalas")
psser = ps.from_pandas(pser)
self.assert_eq(pser.tail(), psser.tail())
self.assert_eq(pser.tail(10), psser.tail(10))
self.assert_eq(pser.tail(-990), psser.tail(-990))
self.assert_eq(pser.tail(0), psser.tail(0))
self.assert_eq(pser.tail(1001), psser.tail(1001))
self.assert_eq(pser.tail(-1001), psser.tail(-1001))
self.assert_eq((pser + 1).tail(), (psser + 1).tail())
self.assert_eq((pser + 1).tail(10), (psser + 1).tail(10))
self.assert_eq((pser + 1).tail(-990), (psser + 1).tail(-990))
self.assert_eq((pser + 1).tail(0), (psser + 1).tail(0))
self.assert_eq((pser + 1).tail(1001), (psser + 1).tail(1001))
self.assert_eq((pser + 1).tail(-1001), (psser + 1).tail(-1001))
with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"):
psser.tail("10")
def test_product(self):
pser = pd.Series([10, 20, 30, 40, 50])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
# Containing NA values
pser = pd.Series([10, np.nan, 30, np.nan, 50])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod(), almost=True)
# All-NA values
pser = pd.Series([np.nan, np.nan, np.nan])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
# Empty Series
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
# Boolean Series
pser = pd.Series([True, True, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
pser = pd.Series([False, False, False])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
pser = pd.Series([True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(), psser.prod())
# With `min_count` parameter
pser = pd.Series([10, 20, 30, 40, 50])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(min_count=5), psser.prod(min_count=5))
self.assert_eq(pser.prod(min_count=6), psser.prod(min_count=6))
pser = pd.Series([10, np.nan, 30, np.nan, 50])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(min_count=3), psser.prod(min_count=3), almost=True)
self.assert_eq(pser.prod(min_count=4), psser.prod(min_count=4))
pser = pd.Series([np.nan, np.nan, np.nan])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(min_count=1), psser.prod(min_count=1))
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(pser.prod(min_count=1), psser.prod(min_count=1))
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
ps.Series(["a", "b", "c"]).prod()
with self.assertRaisesRegex(
TypeError, "Could not convert datetime64\\[ns\\] \\(timestamp\\) to numeric"
):
ps.Series([pd.Timestamp("2016-01-01") for _ in range(3)]).prod()
def test_hasnans(self):
# BooleanType
pser = pd.Series([True, False, True, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.hasnans, psser.hasnans)
pser = pd.Series([True, False, np.nan, True])
psser = ps.from_pandas(pser)
self.assert_eq(pser.hasnans, psser.hasnans)
# TimestampType
pser = pd.Series([pd.Timestamp("2020-07-30") for _ in range(3)])
psser = ps.from_pandas(pser)
self.assert_eq(pser.hasnans, psser.hasnans)
pser = pd.Series([pd.Timestamp("2020-07-30"), np.nan, pd.Timestamp("2020-07-30")])
psser = ps.from_pandas(pser)
self.assert_eq(pser.hasnans, psser.hasnans)
def test_last_valid_index(self):
pser = pd.Series([250, 1.5, 320, 1, 0.3, None, None, None, None])
psser = ps.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), psser.last_valid_index())
# MultiIndex columns
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser.index = midx
psser = ps.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), psser.last_valid_index())
# Empty Series
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), psser.last_valid_index())
def test_first_valid_index(self):
# Empty Series
pser = pd.Series([])
psser = ps.from_pandas(pser)
self.assert_eq(pser.first_valid_index(), psser.first_valid_index())
def test_factorize(self):
pser = pd.Series(["a", "b", "a", "b"])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series([5, 1, 5, 1])
psser = ps.from_pandas(pser)
pcodes, puniques = (pser + 1).factorize(sort=True)
kcodes, kuniques = (psser + 1).factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series(["a", "b", "a", "b"], name="ser", index=["w", "x", "y", "z"])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series(
["a", "b", "a", "b"], index=pd.MultiIndex.from_arrays([[4, 3, 2, 1], [1, 2, 3, 4]])
)
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
#
# Deals with None and np.nan
#
pser = pd.Series(["a", "b", "a", np.nan])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series([1, None, 3, 2, 1])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series(["a", None, "a"])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True)
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pser = pd.Series([None, np.nan])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes, kcodes.to_list())
# pandas: Float64Index([], dtype='float64')
self.assert_eq(pd.Index([]), kuniques)
pser = pd.Series([np.nan, np.nan])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(pcodes, kcodes.to_list())
# pandas: Float64Index([], dtype='float64')
self.assert_eq(pd.Index([]), kuniques)
#
# Deals with na_sentinel
#
# pandas >= 1.1.2 support na_sentinel=None
# pandas >= 0.24 support na_sentinel not to be -1
#
pd_below_1_1_2 = LooseVersion(pd.__version__) < LooseVersion("1.1.2")
pd_below_0_24 = LooseVersion(pd.__version__) < LooseVersion("0.24")
pser = pd.Series(["a", "b", "a", np.nan, None])
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize(sort=True, na_sentinel=-2)
kcodes, kuniques = psser.factorize(na_sentinel=-2)
self.assert_eq([0, 1, 0, -2, -2] if pd_below_0_24 else pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
pcodes, puniques = pser.factorize(sort=True, na_sentinel=2)
kcodes, kuniques = psser.factorize(na_sentinel=2)
self.assert_eq([0, 1, 0, 2, 2] if pd_below_0_24 else pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
if not pd_below_1_1_2:
pcodes, puniques = pser.factorize(sort=True, na_sentinel=None)
kcodes, kuniques = psser.factorize(na_sentinel=None)
self.assert_eq(pcodes.tolist(), kcodes.to_list())
# puniques is Index(['a', 'b', nan], dtype='object')
self.assert_eq(ps.Index(["a", "b", None]), kuniques)
psser = ps.Series([1, 2, np.nan, 4, 5]) # Arrow takes np.nan as null
psser.loc[3] = np.nan # Spark takes np.nan as NaN
kcodes, kuniques = psser.factorize(na_sentinel=None)
pcodes, puniques = psser.to_pandas().factorize(sort=True, na_sentinel=None)
self.assert_eq(pcodes.tolist(), kcodes.to_list())
self.assert_eq(puniques, kuniques)
def test_pad(self):
pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x")
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pser.pad(), psser.pad())
# Test `inplace=True`
pser.pad(inplace=True)
psser.pad(inplace=True)
self.assert_eq(pser, psser)
else:
expected = ps.Series([np.nan, 2, 3, 4, 4, 6], name="x")
self.assert_eq(expected, psser.pad())
# Test `inplace=True`
psser.pad(inplace=True)
self.assert_eq(expected, psser)
def test_explode(self):
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pser = pd.Series([[1, 2, 3], [], None, [3, 4]])
psser = ps.from_pandas(pser)
self.assert_eq(pser.explode(), psser.explode(), almost=True)
# MultiIndex
pser.index = pd.MultiIndex.from_tuples([("a", "w"), ("b", "x"), ("c", "y"), ("d", "z")])
psser = ps.from_pandas(pser)
self.assert_eq(pser.explode(), psser.explode(), almost=True)
# non-array type Series
pser = pd.Series([1, 2, 3, 4])
psser = ps.from_pandas(pser)
self.assert_eq(pser.explode(), psser.explode())
else:
pser = pd.Series([[1, 2, 3], [], None, [3, 4]])
psser = ps.from_pandas(pser)
expected = pd.Series([1.0, 2.0, 3.0, None, None, 3.0, 4.0], index=[0, 0, 0, 1, 2, 3, 3])
self.assert_eq(psser.explode(), expected)
# MultiIndex
pser.index = pd.MultiIndex.from_tuples([("a", "w"), ("b", "x"), ("c", "y"), ("d", "z")])
psser = ps.from_pandas(pser)
expected = pd.Series(
[1.0, 2.0, 3.0, None, None, 3.0, 4.0],
index=pd.MultiIndex.from_tuples(
[
("a", "w"),
("a", "w"),
("a", "w"),
("b", "x"),
("c", "y"),
("d", "z"),
("d", "z"),
]
),
)
self.assert_eq(psser.explode(), expected)
# non-array type Series
pser = pd.Series([1, 2, 3, 4])
psser = ps.from_pandas(pser)
expected = pser
self.assert_eq(psser.explode(), expected)
def test_argsort(self):
# Without null values
pser = pd.Series([0, -100, 50, 100, 20], index=["A", "B", "C", "D", "E"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("a", "v"), ("b", "w"), ("c", "x"), ("d", "y"), ("e", "z")]
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# With name
pser.name = "Koalas"
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# Series from Index
pidx = pd.Index([4.0, -6.0, 2.0, -100.0, 11.0, 20.0, 1.0, -99.0])
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index()
)
# Series from Index with name
pidx.name = "Koalas"
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index()
)
# Series from DataFrame
pdf = pd.DataFrame({"A": [4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.A.argsort().sort_index(), psdf.A.argsort().sort_index())
self.assert_eq((-pdf.A).argsort().sort_index(), (-psdf.A).argsort().sort_index())
# With null values
pser = pd.Series([0, -100, np.nan, 100, np.nan], index=["A", "B", "C", "D", "E"])
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# MultiIndex with null values
pser.index = pd.MultiIndex.from_tuples(
[("a", "v"), ("b", "w"), ("c", "x"), ("d", "y"), ("e", "z")]
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# With name with null values
pser.name = "Koalas"
psser = ps.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), psser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-psser).argsort().sort_index())
# Series from Index with null values
pidx = pd.Index([4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0])
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index()
)
# Series from Index with name with null values
pidx.name = "Koalas"
psidx = ps.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), psidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-psidx.to_series()).argsort().sort_index()
)
# Series from DataFrame with null values
pdf = pd.DataFrame({"A": [4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.A.argsort().sort_index(), psdf.A.argsort().sort_index())
self.assert_eq((-pdf.A).argsort().sort_index(), (-psdf.A).argsort().sort_index())
def test_argmin_argmax(self):
pser = pd.Series(
{
"Corn Flakes": 100.0,
"Almond Delight": 110.0,
"Cinnamon Toast Crunch": 120.0,
"Cocoa Puff": 110.0,
"Expensive Flakes": 120.0,
"Cheap Flakes": 100.0,
},
name="Koalas",
)
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(pser.argmin(), psser.argmin())
self.assert_eq(pser.argmax(), psser.argmax())
# MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("a", "t"), ("b", "u"), ("c", "v"), ("d", "w"), ("e", "x"), ("f", "u")]
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.argmin(), psser.argmin())
self.assert_eq(pser.argmax(), psser.argmax())
# Null Series
self.assert_eq(pd.Series([np.nan]).argmin(), ps.Series([np.nan]).argmin())
self.assert_eq(pd.Series([np.nan]).argmax(), ps.Series([np.nan]).argmax())
else:
self.assert_eq(pser.values.argmin(), psser.argmin())
self.assert_eq(pser.values.argmax(), psser.argmax())
# MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("a", "t"), ("b", "u"), ("c", "v"), ("d", "w"), ("e", "x"), ("f", "u")]
)
psser = ps.from_pandas(pser)
self.assert_eq(pser.values.argmin(), psser.argmin())
self.assert_eq(pser.values.argmax(), psser.argmax())
# Null Series
self.assert_eq(-1, ps.Series([np.nan]).argmin())
self.assert_eq(-1, ps.Series([np.nan]).argmax())
with self.assertRaisesRegex(ValueError, "attempt to get argmin of an empty sequence"):
ps.Series([]).argmin()
with self.assertRaisesRegex(ValueError, "attempt to get argmax of an empty sequence"):
ps.Series([]).argmax()
def test_backfill(self):
pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x")
psser = ps.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pser.backfill(), psser.backfill())
# Test `inplace=True`
pser.backfill(inplace=True)
psser.backfill(inplace=True)
self.assert_eq(pser, psser)
else:
expected = ps.Series([2.0, 2.0, 3.0, 4.0, 6.0, 6.0], name="x")
self.assert_eq(expected, psser.backfill())
# Test `inplace=True`
psser.backfill(inplace=True)
self.assert_eq(expected, psser)
def test_align(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
psdf = ps.from_pandas(pdf)
for join in ["outer", "inner", "left", "right"]:
for axis in [None, 0]:
psser_l, psser_r = psdf.a.align(psdf.b, join=join, axis=axis)
pser_l, pser_r = pdf.a.align(pdf.b, join=join, axis=axis)
self.assert_eq(psser_l, pser_l)
self.assert_eq(psser_r, pser_r)
psser_l, psdf_r = psdf.b.align(psdf[["b", "a"]], join=join, axis=axis)
pser_l, pdf_r = pdf.b.align(pdf[["b", "a"]], join=join, axis=axis)
self.assert_eq(psser_l, pser_l)
self.assert_eq(psdf_r, pdf_r)
self.assertRaises(ValueError, lambda: psdf.a.align(psdf.b, axis=1))
def test_pow_and_rpow(self):
pser = pd.Series([1, 2, np.nan])
psser = ps.from_pandas(pser)
self.assert_eq(pser.pow(np.nan), psser.pow(np.nan))
self.assert_eq(pser ** np.nan, psser ** np.nan)
self.assert_eq(pser.rpow(np.nan), psser.rpow(np.nan))
self.assert_eq(1 ** pser, 1 ** psser)
def test_between_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pser = pd.Series([1, 2, 3, 4], index=idx)
psser = ps.from_pandas(pser)
self.assert_eq(
pser.between_time("0:15", "0:45").sort_index(),
psser.between_time("0:15", "0:45").sort_index(),
)
pser.index.name = "ts"
psser = ps.from_pandas(pser)
self.assert_eq(
pser.between_time("0:15", "0:45").sort_index(),
psser.between_time("0:15", "0:45").sort_index(),
)
pser.index.name = "index"
psser = ps.from_pandas(pser)
self.assert_eq(
pser.between_time("0:15", "0:45").sort_index(),
psser.between_time("0:15", "0:45").sort_index(),
)
def test_at_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pser = pd.Series([1, 2, 3, 4], index=idx)
psser = ps.from_pandas(pser)
self.assert_eq(
pser.at_time("0:20").sort_index(),
psser.at_time("0:20").sort_index(),
)
pser.index.name = "ts"
psser = ps.from_pandas(pser)
self.assert_eq(
pser.at_time("0:20").sort_index(),
psser.at_time("0:20").sort_index(),
)
pser.index.name = "index"
psser = ps.from_pandas(pser)
self.assert_eq(
pser.at_time("0:20").sort_index(),
psser.at_time("0:20").sort_index(),
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_series import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
agadiraju/519finalproject | svm/test_poly_svm.py | 1 | 1515 | print(__doc__)
import numpy as np
from sklearn import metrics
from sklearn import datasets
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
from datetime import datetime
from import_train import rmsle
from import_train import import_training_file
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn import preprocessing as pre
from scipy import sparse
import sys
if __name__ == '__main__':
(X, y_total, y_regis, y_casual) = import_training_file(sys.argv[1], True)
n,d = X.shape
nTrain = 0.5*n
Xtrain = X[:nTrain,:]
y_casual_train = y_casual[:nTrain]
y_regis_train = y_regis[:nTrain]
y_total_train = y_total[:nTrain]
Xtest = X[nTrain:,:]
y_casual_test = y_casual[nTrain:]
y_regis_test = y_regis[nTrain:]
y_total_test = y_total[nTrain:]
#linear
#param_grid = {'C': [1, 5, 10, 100],}
#clf = GridSearchCV(SVC(kernel='linear'), param_grid,n_jobs=-1)
#clf = SVC(kernel='poly')
#clf.fit(Xtrain,ytrain)
#pred = clf.predict(Xtest)
#print "best estimator = ",clf.best_estimator_
#print "RMSE poly = ", rmsle(ytest, pred)
#new stuff
clf_regis = SVR(kernel='poly')
clf_regis.fit(Xtrain,y_regis_train)
pred_regis = clf_regis.predict(Xtest)
clf_casual = SVR(kernel='poly')
clf_casual.fit(Xtrain,y_casual_train)
pred_casual = clf_casual.predict(Xtest)
pred_total = pred_casual + pred_regis
print "RMSLE poly total = ", rmsle(y_total_test, pred_total)
| mit |
badbytes/pymeg | pdf2py/headshape.py | 1 | 2025 | # Copyright 2008 Dan Collins
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# And is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Build; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#try:from scipy.io.numpyio import *
#except ImportError: from extra.numpyio import *
from numpy import char, reshape
from pdf2py import io_wrapper
fread = io_wrapper.fread
fwrite = io_wrapper.fwrite
#import matplotlib.axes3d as p3 #legacy code
'3d plotting doesnt work in pylab v .98'
#danc changed units to mm from meters
class read:
def __init__(self, hsfile):
self.fid=open(hsfile, "r")
self.hdr_version = fread(self.fid, 1, 'i', 'i', 1);
self.hdr_timestamp = fread(self.fid, 1, 'i', 'i', 1);
self.hdr_checksum = fread(self.fid, 1, 'i', 'i', 1);
self.hdr_npoints = fread(self.fid, 1, 'i', 'i', 1);
self.index_lpa = fread(self.fid, 3, 'd', 'd', 1)*1000;
self.index_rpa = fread(self.fid, 3, 'd', 'd', 1)*1000;
self.index_nasion = fread(self.fid, 3, 'd', 'd', 1)*1000;
self.index_cz = fread(self.fid, 3, 'd', 'd', 1)*1000;
self.index_inion = fread(self.fid, 3, 'd', 'd', 1)*1000;
hs_pointvec = fread(self.fid, self.hdr_npoints*3, 'd', 'd', 1)*1000;
self.hs_point = reshape(hs_pointvec, [len(hs_pointvec)/3, 3])
self.fid.close
if __name__ == "__main__":
hsfile = '/media/2TB/4D_data/msw_data/spartan_data0/0611/IB_MOTb/04%13%11@14:55/1/hs_file'
c = read(hsfile)
print c.hdr_npoints, c.hs_point
| gpl-3.0 |
IndraVikas/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
ThomasMiconi/nupic.research | projects/sequence_prediction/reberGrammar/reberSequencePrediction_HMM.py | 12 | 5350 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from htmresearch.support.reberGrammar import *
from htmresearch.algorithms.hidden_markov_model import HMM
import matplotlib.pyplot as plt
import numpy as np
from copy import copy
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
plt.ion()
from sys import stdout
MAXLENGTH = 20
numTestSequence = 50
numStates = 2
numCats = len(chars)
charsToInts = dict(zip(chars, range(numCats)))
def initializeHMM(numStates=numStates, possibleObservations=numCats):
print "Initializing HMM..."
hmm = HMM(numStates=numStates, numCats=possibleObservations)
hmm.pi = np.random.rand(numStates)
hmm.pi /= sum(hmm.pi)
hmm.A = np.random.rand(numStates, numStates)
A_row_sums = hmm.A.sum(axis=1)
hmm.A /= A_row_sums[:, np.newaxis]
hmm.B = np.random.rand(numStates, numCats)
B_row_sums = hmm.B.sum(axis=1)
hmm.B /= B_row_sums[:, np.newaxis]
print "Initial HMM stats"
print "A: ",
print hmm.A
print "B: ",
print hmm.B
print "pi: ",
print hmm.pi
return hmm
def learnHMM(numTrainSequence, numStates=numStates):
hmm = initializeHMM(numStates)
for i in range(numTrainSequence):
sample, _ = generateSequences(MAXLENGTH)
sampleInts = np.array([charsToInts[c] for c in sample])
hmm.train(sampleInts)
print "HMM stats"
print "A: ",
print hmm.A
print "B: ",
print hmm.B
print "pi: ",
print hmm.pi
return hmm
def testHMM(hmm, numTestSequence=numTestSequence):
outcomeAll = []
numOutcome = 0.0
numPred = 0.0
numMiss = 0.0
numFP = 0.0
numStep = 0.0
for _ in range(numTestSequence):
sample, target = generateSequences(MAXLENGTH)
hmm.reset()
for i in range(len(sample)):
current_input = charsToInts[sample[i]]
possible_next_inputs = set(np.array([charsToInts[c] for c in target[i]]))
predicted_next_inputs = hmm.predict_next_inputs(current_input)
# fraction of predicted inputs that were in possible outputs
numPreds = 1.0*len(predicted_next_inputs)
if numPreds > 0:
outcome = len(predicted_next_inputs & possible_next_inputs) / numPreds
else:
outcome = 0
outcomeAll.append(outcome)
# missN is number of possible outcomes not predicted
missN = len(possible_next_inputs - predicted_next_inputs)
#fpN is number of predicted outcomes not possible
fpN = len(predicted_next_inputs - possible_next_inputs)
numPred += numPreds
numOutcome += len(target)
numMiss += missN
numFP += fpN
numStep += 1
correctRate = sum(outcomeAll)/float(len(outcomeAll))
missRate = float(numMiss)/float(numStep * 7)
fpRate = float(numFP)/float(numStep * 7)
errRate = float(numMiss + numFP)/float(numStep * 7)
print("Correct Rate (Best Prediction): ", correctRate)
print("Error Rate: ", errRate)
print("Miss Rate: ", missRate)
print("False Positive Rate: ", fpRate)
return correctRate, missRate, fpRate
def runSingleExperiment(numTrainSequence, numTestSequence=numTestSequence):
hmm = learnHMM(numTrainSequence)
return testHMM(hmm, numTestSequence)
def runExperiment():
"""
Experiment 1: Calculate error rate as a function of training sequence numbers
:return:
"""
trainSeqN = [5, 10, 20, 50, 100, 200]
rptPerCondition = 20
correctRateAll = np.zeros((len(trainSeqN), rptPerCondition))
missRateAll = np.zeros((len(trainSeqN), rptPerCondition))
fpRateAll = np.zeros((len(trainSeqN), rptPerCondition))
for i in xrange(len(trainSeqN)):
for rpt in xrange(rptPerCondition):
numTrainSequence = trainSeqN[i]
correctRate, missRate, fpRate = runSingleExperiment(numTrainSequence=numTrainSequence)
correctRateAll[i, rpt] = correctRate
missRateAll[i, rpt] = missRate
fpRateAll[i, rpt] = fpRate
plt.figure()
plt.subplot(2,2,1)
plt.semilogx(trainSeqN, 100*np.mean(correctRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Hit Rate - Best Match (%)')
plt.subplot(2,2,2)
plt.semilogx(trainSeqN, 100*np.mean(missRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Miss Rate (%)')
plt.subplot(2,2,3)
plt.semilogx(trainSeqN, 100*np.mean(fpRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' False Positive Rate (%)')
plt.savefig('result/ReberSequence_HMMperformance.pdf')
plt.show()
if __name__ == "__main__":
runExperiment()
| agpl-3.0 |
mhdella/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
winklerand/pandas | pandas/core/indexes/api.py | 2 | 4715 | from pandas.core.indexes.base import (Index,
_new_Index,
_ensure_index,
_ensure_index_from_sequences,
_get_na_value,
InvalidIndexError) # noqa
from pandas.core.indexes.category import CategoricalIndex # noqa
from pandas.core.indexes.multi import MultiIndex # noqa
from pandas.core.indexes.interval import IntervalIndex # noqa
from pandas.core.indexes.numeric import (NumericIndex, Float64Index, # noqa
Int64Index, UInt64Index)
from pandas.core.indexes.range import RangeIndex # noqa
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.datetimes import DatetimeIndex
import pandas.core.common as com
from pandas._libs import lib
from pandas._libs.tslib import NaT
# TODO: there are many places that rely on these private methods existing in
# pandas.core.index
__all__ = ['Index', 'MultiIndex', 'NumericIndex', 'Float64Index', 'Int64Index',
'CategoricalIndex', 'IntervalIndex', 'RangeIndex', 'UInt64Index',
'InvalidIndexError', 'TimedeltaIndex',
'PeriodIndex', 'DatetimeIndex',
'_new_Index', 'NaT',
'_ensure_index', '_ensure_index_from_sequences', '_get_na_value',
'_get_combined_index',
'_get_objs_combined_axis', '_union_indexes',
'_get_consensus_names',
'_all_indexes_same']
def _get_objs_combined_axis(objs, intersect=False, axis=0):
# Extract combined index: return intersection or union (depending on the
# value of "intersect") of indexes on given axis, or None if all objects
# lack indexes (e.g. they are numpy arrays)
obs_idxes = [obj._get_axis(axis) for obj in objs
if hasattr(obj, '_get_axis')]
if obs_idxes:
return _get_combined_index(obs_idxes, intersect=intersect)
def _get_combined_index(indexes, intersect=False):
# TODO: handle index names!
indexes = com._get_distinct_objs(indexes)
if len(indexes) == 0:
return Index([])
if len(indexes) == 1:
return indexes[0]
if intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.intersection(other)
return index
union = _union_indexes(indexes)
return _ensure_index(union)
def _union_indexes(indexes):
if len(indexes) == 0:
raise AssertionError('Must have at least 1 Index to union')
if len(indexes) == 1:
result = indexes[0]
if isinstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds):
def conv(i):
if isinstance(i, Index):
i = i.tolist()
return i
return Index(lib.fast_unique_multiple_list([conv(i) for i in inds]))
if kind == 'special':
result = indexes[0]
if hasattr(result, 'union_many'):
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == 'array':
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
return _unique_indices(indexes)
name = _get_consensus_names(indexes)[0]
if name != index.name:
index = index._shallow_copy(name=name)
return index
else:
return _unique_indices(indexes)
def _sanitize_and_check(indexes):
kinds = list({type(index) for index in indexes})
if list in kinds:
if len(kinds) > 1:
indexes = [Index(com._try_sort(x))
if not isinstance(x, Index) else
x for x in indexes]
kinds.remove(list)
else:
return indexes, 'list'
if len(kinds) > 1 or Index not in kinds:
return indexes, 'special'
else:
return indexes, 'array'
def _get_consensus_names(indexes):
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = set(tuple(i.names) for i in indexes
if com._any_not_none(*i.names))
if len(consensus_names) == 1:
return list(list(consensus_names)[0])
return [None] * indexes[0].nlevels
def _all_indexes_same(indexes):
first = indexes[0]
for index in indexes[1:]:
if not first.equals(index):
return False
return True
| bsd-3-clause |
nuclear-wizard/moose | python/postprocessing/tests/test_combine_csv.py | 4 | 5272 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import unittest
import pandas
try:
from postprocessing import combine_csv
except ModuleNotFoundError:
pass
class TestCombineCSV(unittest.TestCase):
"""
Test use of combine_csv.py for combining csv files.
"""
def setUp(self):
"""
Define the pattern for test files.
"""
self.__goldpath = os.path.abspath('../../test_files/gold')
self.__basename = os.path.abspath('../../test_files/test_combine_in_')
def tearDown(self):
"""
Remove made CSV files
"""
if os.path.exists("remove_me_54.csv"):
os.remove("remove_me_54.csv")
def testBasic(self):
"""
Test basic usage with minimal options and headers written.
"""
df_test = combine_csv.CombineCSV(self.__basename, "remove_me_54.csv",
"large_number", write_header=True)
self.assertTrue(df_test._ended)
gold_df = pandas.read_csv("{}/combine_basic.csv".format(
self.__goldpath))
self.assertTrue(df_test._final_df.equals(gold_df),
msg="Pandas dataframe is different from gold CSV for basic usage.")
def testBasicTime(self):
"""
Test basic usage with headers and a time file.
"""
df_test = combine_csv.CombineCSV(self.__basename, "remove_me_54.csv",
"large_number", write_header=True, timefile=True)
self.assertTrue(df_test._ended)
gold_df = pandas.read_csv("{}/combine_basic_time.csv".format(
self.__goldpath))
self.assertTrue(df_test._final_df.equals(gold_df),
msg="Pandas dataframe is different from gold CSV for time usage.")
def testBasicX(self):
"""
Test basic usage with headers and a "x" variable name.
"""
df_test = combine_csv.CombineCSV(self.__basename, "remove_me_54.csv",
"large_number", write_header=True, x_varname='y')
self.assertTrue(df_test._ended)
gold_df = pandas.read_csv("{}/combine_basic_x.csv".format(
self.__goldpath))
self.assertTrue(df_test._final_df.equals(gold_df),
msg="Pandas dataframe is different from gold CSV for x variable usage.")
def testBilinear(self):
"""
Test bilinear usage.
"""
df_test = combine_csv.CombineCSV(self.__basename, "remove_me_54.csv",
"large_number", write_header=True, x_varname='y',
timefile=True, bilinear=True)
self.assertTrue(df_test._ended)
gold_df = pandas.read_csv("{}/combine_bilinear.csv".format(
self.__goldpath))
self.assertTrue(df_test._final_df.equals(gold_df),
msg="Pandas dataframe is different from gold CSV for bilinear usage.")
def testBasenameError(self):
"""
Test exception when bad basename is provided.
"""
with self.assertRaises(combine_csv.CombineCSV.CombineError) as cerr:
df_test = combine_csv.CombineCSV('bad_basename_54',
"remove_me_54.csv", "large_number")
self.assertEqual(cerr.exception._name, "BasenameError")
def testStepBoundsError(self):
"""
Test exception when mismatch of steps are provided.
"""
with self.assertRaises(combine_csv.CombineCSV.CombineError) as cerr:
df_test = combine_csv.CombineCSV(self.__basename,
"remove_me_54.csv", "large_number", lastn=2, endt=1)
self.assertEqual(cerr.exception._name, "StepBoundsError")
def testXVariableError(self):
"""
Test exception when bad "x" variable name is provided.
"""
with self.assertRaises(combine_csv.CombineCSV.CombineError) as cerr:
df_test = combine_csv.CombineCSV(self.__basename,
"remove_me_54.csv", "large_number",
x_varname='bad_x54_name')
self.assertEqual(cerr.exception._name, "XVariableError")
def testInconsistentError(self):
"""
Test exception when data rows are not consistent.
"""
with self.assertRaises(combine_csv.CombineCSV.CombineError) as cerr:
df_test = combine_csv.CombineCSV("{}54_bad_".format(
self.__basename), "remove_me_54.csv", "large_number",
x_varname='x')
self.assertEqual(cerr.exception._name, "InconsistentError")
def testYVariableError(self):
"""
Test exception when bad "y" variable name is provided.
"""
with self.assertRaises(combine_csv.CombineCSV.CombineError) as cerr:
df_test = combine_csv.CombineCSV(self.__basename,
"remove_me_54.csv", "bad_y54_name")
self.assertEqual(cerr.exception._name, "YVariableError")
if __name__ == '__main__':
import sys
sys.path.append("..")
import combine_csv
unittest.main(module=__name__, verbosity=2)
| lgpl-2.1 |
noelevans/sandpit | time_series_prediction_1.py | 1 | 2470 | import datetime
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
DATA = [1807, 1957, 1574, 1531, 1026, 973, 1207, 1127, 1171, 811, 21534, 28001,
40021, 27690, 20560, 17119, 16904, 22417, 9585, 25978, 26957, 14802,
10794, 13838, 22581, 14669, 3684, 10126, 13599, 27646, 17838, 17028,
14616, 14433, 27013, 11949, 42065, 9907, 9951, 22854, 12362, 10369,
10833, 13040, 21326, 15637, 6846, 7070, 10412, 21954, 12614, 24361,
13038, 12850, 30691, 21348, 11775, 12354, 12098, 24439, 14209, 9804,
9589, 10614, 21312, 11933, 10310, 10138, 19546, 24428, 22483, 10746,
13125, 13556, 25044, 9880, 16182, 13138, 25781, 25709, 14522, 8779,
9969, 9779, 21988, 13763, 9075, 9544, 11393, 21210, 10454, 4307, 4456,
8944, 18892, 9262, 13495, 8258, 7197, 21006, 18046, 4002, 11867, 8192,
21920, 9979, 4031, 4840, 4820, 16573, 6917, 4084, 5296, 4821, 19136,
15487, 6127, 9275, 12540, 20698, 9229, 2389, 6735, 6563, 19895]
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def main():
start = datetime.date(2015, 9, 12)
data = [{'date': start + datetime.timedelta(days=n), 'vol': val}
for n, val in enumerate(DATA)]
df = pd.DataFrame(data, columns=['date', 'vol'])
mean = np.mean(df.vol)
std = np.std(df.vol)
lower_pc = np.percentile(df.vol, 2.5)
upper_pc = np.percentile(df.vol, 97.5)
f, (ax1, ax2) = plt.subplots(2)
df.plot(ax=ax1)
window_len = 20
window = rolling_window(df.vol.values, window_len)
rolling_lower_pc = np.percentile(window, 5, axis=1)
rolling_upper_pc = np.percentile(window, 95, axis=1)
X = np.arange(len(rolling_lower_pc)) + window_len
ax1.fill_between(X, rolling_lower_pc, rolling_upper_pc,
alpha=0.2, label='5-95th percentile window')
ax2.hist(df.vol, 20, alpha=0.7)
ax2.axvspan(mean - 2 * std,
mean + 2 * std,
alpha=0.2,
label='SD',
zorder=-1)
ax2.axvspan(lower_pc,
upper_pc,
alpha=0.4,
color='grey',
hatch='/',
label='Percentiles',
zorder=-1)
plt.legend()
plt.show()
if __name__ == '__main__':
main()
| mit |
gotomypc/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/pandas/core/indexes/period.py | 1 | 44774 | # pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
import numpy as np
import warnings
from pandas.core import common as com
from pandas.core.dtypes.common import (
is_integer,
is_float,
is_integer_dtype,
is_float_dtype,
is_scalar,
is_datetime64_dtype,
is_datetime64_any_dtype,
is_period_dtype,
is_bool_dtype,
pandas_dtype,
_ensure_object)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import ABCSeries
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.offsets import Tick, DateOffset
from pandas.core.indexes.datetimes import DatetimeIndex, Int64Index, Index
from pandas.core.indexes.datetimelike import DatelikeOps, DatetimeIndexOpsMixin
from pandas.core.tools.datetimes import parse_time_string
from pandas._libs.lib import infer_dtype
from pandas._libs import tslib, index as libindex
from pandas._libs.tslibs.period import (Period, IncompatibleFrequency,
get_period_field_arr,
_validate_end_alias, _quarter_to_myear)
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas._libs.tslibs import resolution, period
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
from pandas.core.base import _shared_docs
from pandas.core.indexes.base import _index_shared_docs, _ensure_index
from pandas import compat
from pandas.util._decorators import (Appender, Substitution, cache_readonly,
deprecate_kwarg)
from pandas.compat import zip, u
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(target_klass='PeriodIndex or list of Periods'))
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
result = get_period_field_arr(alias, self._ndarray_values, base)
return Index(result, name=self.name)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX
def _period_index_cmp(opname, cls):
"""
Wrap comparison operations to convert Period-like to PeriodDtype
"""
nat_result = True if opname == '__ne__' else False
def wrapper(self, other):
op = getattr(self._ndarray_values, opname)
if isinstance(other, Period):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = op(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = op(other._ndarray_values)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is tslib.NaT:
result = np.empty(len(self._ndarray_values), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
result = op(other.ordinal)
if self.hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
if d['data'].dtype == 'int64':
values = d.pop('data')
return cls._from_ordinals(values=values, **d)
class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
dayofyear
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
tz_convert
tz_localize
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
See Also
---------
Index : The base pandas Index type
Period : Represents a period of time
DatetimeIndex : Index with datetime64 data
TimedeltaIndex : Index of timedelta64 data
"""
_typ = 'periodindex'
_attributes = ['name', 'freq']
# define my properties & methods for delegation
_other_ops = []
_bool_ops = ['is_leap_year']
_object_ops = ['start_time', 'end_time', 'freq']
_field_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'weekday', 'week', 'dayofweek',
'dayofyear', 'quarter', 'qyear',
'days_in_month', 'daysinmonth']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ['strftime', 'to_timestamp', 'asfreq']
_is_numeric_dtype = False
_infer_as_myclass = True
_freq = None
_engine_type = libindex.PeriodEngine
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
cls.__eq__ = _period_index_cmp('__eq__', cls)
cls.__ne__ = _period_index_cmp('__ne__', cls)
cls.__lt__ = _period_index_cmp('__lt__', cls)
cls.__gt__ = _period_index_cmp('__gt__', cls)
cls.__le__ = _period_index_cmp('__le__', cls)
cls.__ge__ = _period_index_cmp('__ge__', cls)
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, tz=None, dtype=None, copy=False, name=None,
**fields):
valid_field_set = {'year', 'month', 'day', 'quarter',
'hour', 'minute', 'second'}
if not set(fields).issubset(valid_field_set):
raise TypeError('__new__() got an unexpected keyword argument {}'.
format(list(set(fields) - valid_field_set)[0]))
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if name is None and hasattr(data, 'name'):
name = data.name
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError('dtype must be PeriodDtype')
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
msg = 'specified freq and dtype are different'
raise IncompatibleFrequency(msg)
# coerce freq to freq object, otherwise it can be coerced elementwise
# which is slow
if freq:
freq = Period._maybe_convert_freq(freq)
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, fields)
return cls._from_ordinals(data, name=name, freq=freq)
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq: # no freq change
freq = data.freq
data = data._ndarray_values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = period.period_asfreq_arr(data._ndarray_values,
base1, base2, 1)
return cls._simple_new(data, name=name, freq=freq)
# not array / index
if not isinstance(data, (np.ndarray, PeriodIndex,
DatetimeIndex, Int64Index)):
if is_scalar(data) or isinstance(data, Period):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data)
# datetime other than period
if is_datetime64_dtype(data.dtype):
data = dt64arr_to_periodarr(data, freq, tz)
return cls._from_ordinals(data, name=name, freq=freq)
# check not floats
if infer_dtype(data) == 'floating' and len(data) > 0:
raise TypeError("PeriodIndex does not allow "
"floating point in construction")
# anything else, likely an array of strings or periods
data = _ensure_object(data)
freq = freq or period.extract_freq(data)
data = period.extract_ordinals(data, freq)
return cls._from_ordinals(data, name=name, freq=freq)
@cache_readonly
def _engine(self):
return self._engine_type(lambda: self, len(self))
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
"""
Values can be any type that can be coerced to Periods.
Ordinals in an ndarray are fastpath-ed to `_from_ordinals`
"""
if not is_integer_dtype(values):
values = np.array(values, copy=False)
if len(values) > 0 and is_float_dtype(values):
raise TypeError("PeriodIndex can't take floats")
return cls(values, name=name, freq=freq, **kwargs)
return cls._from_ordinals(values, name, freq, **kwargs)
@classmethod
def _from_ordinals(cls, values, name=None, freq=None, **kwargs):
"""
Values should be int ordinals
`__new__` & `_simple_new` cooerce to ordinals and call this method
"""
values = np.array(values, dtype='int64', copy=False)
result = object.__new__(cls)
result._data = values
result.name = name
if freq is None:
raise ValueError('freq is not specified and cannot be inferred')
result._freq = Period._maybe_convert_freq(freq)
result._reset_identity()
return result
def _shallow_copy_with_infer(self, values=None, **kwargs):
""" we always want to return a PeriodIndex """
return self._shallow_copy(values=values, **kwargs)
def _shallow_copy(self, values=None, freq=None, **kwargs):
if freq is None:
freq = self.freq
if values is None:
values = self._ndarray_values
return super(PeriodIndex, self)._shallow_copy(values=values,
freq=freq, **kwargs)
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return PeriodIndex([item], **self._get_attributes_dict())
@Appender(_index_shared_docs['__contains__'])
def __contains__(self, key):
if isinstance(key, Period):
if key.freq != self.freq:
return False
else:
return key.ordinal in self._engine
else:
try:
self.get_loc(key)
return True
except Exception:
return False
return False
contains = __contains__
@property
def asi8(self):
return self._ndarray_values.view('i8')
@cache_readonly
def _int64index(self):
return Int64Index(self.asi8, name=self.name, fastpath=True)
@property
def values(self):
return self.astype(object).values
@property
def _ndarray_values(self):
# Ordinals
return self._data
def __array__(self, dtype=None):
if is_integer_dtype(dtype):
return self.asi8
else:
return self.astype(object).values
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if (func is np.add):
pass
elif (func is np.subtract):
name = self.name
left = context[1][0]
right = context[1][1]
if (isinstance(left, PeriodIndex) and
isinstance(right, PeriodIndex)):
name = left.name if left.name == right.name else None
return Index(result, name=name)
elif isinstance(left, Period) or isinstance(right, Period):
return Index(result, name=name)
elif isinstance(func, np.ufunc):
if 'M->M' not in func.types:
msg = "ufunc '{0}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg.format(func.__name__))
if is_bool_dtype(result):
return result
# the result is object dtype array of Period
# cannot pass _simple_new as it is
return self._shallow_copy(result, freq=self.freq, name=self.name)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _to_embed(self, keep_tz=False, dtype=None):
"""
return an array repr of this object, potentially casting to object
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
return self.astype(object).values
@property
def size(self):
# Avoid materializing self._values
return self._ndarray_values.size
@property
def shape(self):
# Avoid materializing self._values
return self._ndarray_values.shape
@property
def _formatter_func(self):
return lambda x: "'%s'" % x
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self._ndarray_values[mask].searchsorted(
where_idx._ndarray_values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx._ndarray_values <
self._ndarray_values[first])] = -1
return result
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True, how='start'):
dtype = pandas_dtype(dtype)
if is_integer_dtype(dtype):
return self._int64index.copy() if copy else self._int64index
elif is_datetime64_any_dtype(dtype):
tz = getattr(dtype, 'tz', None)
return self.to_timestamp(how=how).tz_localize(tz)
elif is_period_dtype(dtype):
return self.asfreq(freq=dtype.freq)
return super(PeriodIndex, self).astype(dtype, copy=copy)
@Substitution(klass='PeriodIndex')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, Period):
if value.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, value.freqstr)
raise IncompatibleFrequency(msg)
value = value.ordinal
elif isinstance(value, compat.string_types):
value = Period(value, freq=self.freq).ordinal
return self._ndarray_values.searchsorted(value, side=side,
sorter=sorter)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
@property
def freq(self):
"""Return the frequency object if it is set, otherwise None"""
return self._freq
@freq.setter
def freq(self, value):
msg = ('Setting PeriodIndex.freq has been deprecated and will be '
'removed in a future version; use PeriodIndex.asfreq instead. '
'The PeriodIndex.freq setter is not guaranteed to work.')
warnings.warn(msg, FutureWarning, stacklevel=2)
self._freq = value
def asfreq(self, freq=None, how='E'):
"""
Convert the PeriodIndex to the specified frequency `freq`.
Parameters
----------
freq : str
a frequency
how : str {'E', 'S'}
'E', 'END', or 'FINISH' for end,
'S', 'START', or 'BEGIN' for start.
Whether the elements should be aligned to the end
or start within pa period. January 31st ('END') vs.
Janury 1st ('START') for example.
Returns
-------
new : PeriodIndex with the new frequency
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
<class 'pandas.core.indexes.period.PeriodIndex'>
[2010, ..., 2015]
Length: 6, Freq: A-DEC
>>> pidx.asfreq('M')
<class 'pandas.core.indexes.period.PeriodIndex'>
[2010-12, ..., 2015-12]
Length: 6, Freq: M
>>> pidx.asfreq('M', how='S')
<class 'pandas.core.indexes.period.PeriodIndex'>
[2010-01, ..., 2015-01]
Length: 6, Freq: M
"""
how = _validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
asi8 = self.asi8
# mult1 can't be negative or 0
end = how == 'E'
if end:
ordinal = asi8 + mult1 - 1
else:
ordinal = asi8
new_data = period.period_asfreq_arr(ordinal, base1, base2, end)
if self.hasnans:
new_data[self._isnan] = tslib.iNaT
return self._simple_new(new_data, self.name, freq=freq)
year = _field_accessor('year', 0, "The year of the period")
month = _field_accessor('month', 3, "The month as January=1, December=12")
day = _field_accessor('day', 4, "The days of the period")
hour = _field_accessor('hour', 5, "The hour of the period")
minute = _field_accessor('minute', 6, "The minute of the period")
second = _field_accessor('second', 7, "The second of the period")
weekofyear = _field_accessor('week', 8, "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 10,
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = day_of_year = _field_accessor('dayofyear', 9,
"The ordinal day of the year")
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
days_in_month = _field_accessor('days_in_month', 11,
"The number of days in the month")
daysinmonth = days_in_month
@property
def is_leap_year(self):
""" Logical indicating if the date belongs to a leap year """
return isleapyear_arr(np.asarray(self.year))
@property
def start_time(self):
return self.to_timestamp(how='start')
@property
def end_time(self):
return self.to_timestamp(how='end')
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.astype(object).values
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeIndex
Parameters
----------
freq : string or DateOffset, optional
Target frequency. The default is 'D' for week or longer,
'S' otherwise
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = frequencies.get_to_timestamp_base(base)
else:
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = period.periodarr_to_dt64arr(new_data._ndarray_values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
def _maybe_convert_timedelta(self, other):
if isinstance(
other, (timedelta, np.timedelta64, Tick, np.ndarray)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, Tick):
if isinstance(other, np.ndarray):
nanos = np.vectorize(delta_to_nanoseconds)(other)
else:
nanos = delta_to_nanoseconds(other)
offset_nanos = delta_to_nanoseconds(offset)
check = np.all(nanos % offset_nanos == 0)
if check:
return nanos // offset_nanos
elif isinstance(other, DateOffset):
freqstr = other.rule_code
base = frequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
msg = "Input has different freq from PeriodIndex(freq={0})"
raise IncompatibleFrequency(msg.format(self.freqstr))
def _add_offset(self, other):
assert not isinstance(other, Tick)
base = frequencies.get_base_alias(other.rule_code)
if base != self.freq.rule_code:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
return self.shift(other.n)
def _add_delta_td(self, other):
assert isinstance(other, (timedelta, np.timedelta64, Tick))
nanos = delta_to_nanoseconds(other)
own_offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(own_offset, Tick):
offset_nanos = delta_to_nanoseconds(own_offset)
if np.all(nanos % offset_nanos == 0):
return self.shift(nanos // offset_nanos)
# raise when input doesn't have freq
raise IncompatibleFrequency("Input has different freq from "
"{cls}(freq={freqstr})"
.format(cls=type(self).__name__,
freqstr=self.freqstr))
def _add_delta(self, other):
ordinal_delta = self._maybe_convert_timedelta(other)
return self.shift(ordinal_delta)
def _sub_datelike(self, other):
assert other is not tslib.NaT
return NotImplemented
def _sub_period(self, other):
if self.freq != other.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
asi8 = self.asi8
new_data = asi8 - other.ordinal
if self.hasnans:
new_data = new_data.astype(np.float64)
new_data[self._isnan] = np.nan
# result must be Int64Index or Float64Index
return Index(new_data)
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
Returns
-------
shifted : PeriodIndex
"""
values = self._ndarray_values + n * self.freq.n
if self.hasnans:
values[self._isnan] = tslib.iNaT
return self._shallow_copy(values=values)
@cache_readonly
def dtype(self):
return PeriodDtype.construct_from_string(self.freq)
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = com._values_from_object(series)
try:
return com._maybe_box(self,
super(PeriodIndex, self).get_value(s, key),
series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = resolution.Resolution.get_freq_group(reso)
freqn = resolution.get_freq_group(self.freq)
vals = self._ndarray_values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self._ndarray_values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
elif grp == freqn:
key = Period(asdt, freq=self.freq).ordinal
return com._maybe_box(self, self._engine.get_value(s, key),
series, key)
else:
raise KeyError(key)
except TypeError:
pass
key = Period(key, self.freq).ordinal
return com._maybe_box(self, self._engine.get_value(s, key),
series, key)
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
target = _ensure_index(target)
if hasattr(target, 'freq') and target.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, target.freqstr)
raise IncompatibleFrequency(msg)
if isinstance(target, PeriodIndex):
target = target.asi8
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
return Index.get_indexer(self._int64index, target, method,
limit, tolerance)
def _get_unique_index(self, dropna=False):
"""
wrap Index._get_unique_index to handle NaT
"""
res = super(PeriodIndex, self)._get_unique_index(dropna=dropna)
if dropna:
res = res.dropna()
return res
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if is_integer(key):
raise
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
try:
key = Period(key, freq=self.freq)
except ValueError:
# we cannot construct the Period
# as we have an invalid type
raise KeyError(key)
try:
ordinal = tslib.iNaT if key is tslib.NaT else key.ordinal
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance,
np.asarray(key))
return self._int64index.get_loc(ordinal, method, tolerance)
except KeyError:
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem']
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, compat.string_types):
try:
_, parsed, reso = parse_time_string(label, self.freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == 'left' else 1]
except Exception:
raise KeyError(label)
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice', label)
return label
def _parsed_string_to_bounds(self, reso, parsed):
if reso == 'year':
t1 = Period(year=parsed.year, freq='A')
elif reso == 'month':
t1 = Period(year=parsed.year, month=parsed.month, freq='M')
elif reso == 'quarter':
q = (parsed.month - 1) // 3 + 1
t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC')
elif reso == 'day':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
freq='D')
elif reso == 'hour':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, freq='H')
elif reso == 'minute':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, freq='T')
elif reso == 'second':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute,
second=parsed.second, freq='S')
else:
raise KeyError(reso)
return (t1.asfreq(self.freq, how='start'),
t1.asfreq(self.freq, how='end'))
def _get_string_slice(self, key):
if not self.is_monotonic:
raise ValueError('Partial indexing only valid for '
'ordered time series')
key, parsed, reso = parse_time_string(key, self.freq)
grp = resolution.Resolution.get_freq_group(reso)
freqn = resolution.get_freq_group(self.freq)
if reso in ['day', 'hour', 'minute', 'second'] and not grp < freqn:
raise KeyError(key)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
return slice(self.searchsorted(t1.ordinal, side='left'),
self.searchsorted(t2.ordinal, side='right'))
def _convert_tolerance(self, tolerance, target):
tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance,
target)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return self._maybe_convert_timedelta(tolerance)
def insert(self, loc, item):
if not isinstance(item, Period) or self.freq != item.freq:
return self.astype(object).insert(loc, item)
idx = np.concatenate((self[:loc].asi8, np.array([item.ordinal]),
self[loc:].asi8))
return self._shallow_copy(idx)
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
self._assert_can_do_setop(other)
result = Int64Index.join(self, other, how=how, level=level,
return_indexers=return_indexers,
sort=sort)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
return self._apply_meta(result)
def _assert_can_do_setop(self, other):
super(PeriodIndex, self)._assert_can_do_setop(other)
if not isinstance(other, PeriodIndex):
raise ValueError('can only call with other PeriodIndex-ed objects')
if self.freq != other.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
result = self._apply_meta(result)
result.name = name
return result
def _apply_meta(self, rawarr):
if not isinstance(rawarr, PeriodIndex):
rawarr = PeriodIndex._from_ordinals(rawarr, freq=self.freq,
name=self.name)
return rawarr
def _format_native_types(self, na_rep=u('NaT'), date_format=None,
**kwargs):
values = self.astype(object).values
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: u('%s') % dt
if self.hasnans:
mask = self._isnan
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([formatter(dt) for dt
in values[imask]])
else:
values = np.array([formatter(dt) for dt in values])
return values
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(PeriodIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backcompat
self._freq = Period._maybe_convert_freq(own_state[1])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(self, state)
self._data = data
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using
pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Notes
-----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
def tz_localize(self, tz, ambiguous='raise'):
"""
Localize tz-naive DatetimeIndex to given time zone (using
pytz/dateutil), or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
Returns
-------
localized : DatetimeIndex
Notes
-----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
PeriodIndex._add_comparison_methods()
PeriodIndex._add_numeric_methods_disabled()
PeriodIndex._add_logical_methods_disabled()
PeriodIndex._add_datetimelike_methods()
def _get_ordinal_range(start, end, periods, freq, mult=1):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
if freq is not None:
_, mult = _gfc(freq)
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError('start and end must have same freq')
if (start is tslib.NaT or end is tslib.NaT):
raise ValueError('start and end must not be NaT')
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError('Could not infer freq from start/end')
if periods is not None:
periods = periods * mult
if start is None:
data = np.arange(end.ordinal - periods + mult,
end.ordinal + 1, mult,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods, mult,
dtype=np.int64)
else:
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
return data, freq
def _range_from_fields(year=None, month=None, quarter=None, day=None,
hour=None, minute=None, second=None, freq=None):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = 'Q'
base = frequencies.FreqGroup.FR_QTR
else:
base, mult = _gfc(freq)
if base != frequencies.FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = _quarter_to_myear(y, q, freq)
val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
base, mult = _gfc(freq)
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(period.period_ordinal(
y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError('Mismatched Period array lengths')
elif length is None:
length = len(x)
arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length) for x in fields]
return arrays
def pnow(freq=None):
# deprecation, xref #13790
warnings.warn("pd.pnow() and pandas.core.indexes.period.pnow() "
"are deprecated. Please use Period.now()",
FutureWarning, stacklevel=2)
return Period.now(freq=freq)
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
Return a fixed frequency PeriodIndex, with day (calendar) as the default
frequency
Parameters
----------
start : string or period-like, default None
Left bound for generating periods
end : string or period-like, default None
Right bound for generating periods
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'D' (calendar daily)
Frequency alias
name : string, default None
Name of the resulting PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
prng : PeriodIndex
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05',
'2017-06', '2017-06', '2017-07', '2017-08', '2017-09',
'2017-10', '2017-11', '2017-12', '2018-01'],
dtype='period[M]', freq='M')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]', freq='M')
"""
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
| mit |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/backends/backend_gtkcairo.py | 8 | 2374 | """
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import gtk
if gtk.pygtk_version < (2,7,0):
import cairo.gtk
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \
'Pycairo(%s)' % backend_cairo.backend_version
_debug = False
#_debug = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if _debug: print('backend_gtkcairo.%s()' % fn_name())
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTKCairo(figure)
return FigureManagerGTK(canvas, num)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.gc.ctx = pixmap.cairo_create()
else:
def set_pixmap (self, pixmap):
self.gc.ctx = cairo.gtk.gdk_cairo_create (pixmap)
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
self._renderer = RendererGTKCairo (self.figure.dpi)
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
FigureCanvas = FigureCanvasGTKCairo
FigureManager = FigureManagerGTKCairo
| apache-2.0 |
GRAAL-Research/Transductive-PAC-Bayes | experiment_bound_figures.py | 1 | 5518 | """
Empirical experiments of 'PAC-Bayesian Theory for Transductive Learning' (Begin et al., 2014)
http://graal.ift.ulaval.ca/aistats2014/
This file allows to reproduce Figures 1, 2 and 3 (the two latter are in Supplementary Material).
Please uncomment the lines in the 'main' function and play with parameters at your will!
Code authors: Pascal Germain, Jean-Francis Roy
Released under the Simplified BSD license
"""
def main():
# Generate Figure 1 of Begin et al. (2014), without N=5000 for quicker calculations
generate_bound_figures(N_list=[200,500], ratios_list=[0.1, 0.5, 0.9], risk=0.2, KLQP=5.0)
# Generate the whole Figure 1 of Begin et al. (2014)
#generate_bound_figures(N_list=[200,500,5000], ratios_list=[0.1, 0.5, 0.9], risk=0.2, KLQP=5.0)
# Generate the whole Figure 2 of Begin et al. (2014, Supplementary Material)
#generate_bound_figures(N_list=[200,500,5000], ratios_list=[0.1, 0.5, 0.9], risk=0.1, KLQP=5.0)
# Generate the whole Figure 3 of Begin et al. (2014, Supplementary Material)
#generate_bound_figures(N_list=[200,500,5000], ratios_list=[0.1, 0.5, 0.9], risk=0.01, KLQP=5.0)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import numpy as np
from collections import OrderedDict
from math import log
from matplotlib import pyplot
from transductive_bounds import compute_general_transductive_gibbs_bound, compute_transductive_complexity_term
import d_functions
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def generate_bound_figures(N_list, ratios_list, risk, KLQP, delta=0.05):
""" Illustrate of the bound calculations
N_list : List of full sample size
ratios_list : List of m/N rations ( where m is the number of labeled examples)
risk : Gibbs risk observed on the training set
KLQP : Kullback-Leibler divergence between prior and posterior
delta : confidence parameter (default=0.05)
"""
divergences_dict = OrderedDict()
divergences_dict["Kullback-Leibler"] = d_functions.kl_divergence
divergences_dict["D*-function"] = d_functions.new_transductive_divergence
divergences_dict["Quadratic Distance"] = d_functions.quadratic_distance
divergences_dict["Variation Distance"] = d_functions.variation_distance
divergences_dict["Triangular Discrimination"] = d_functions.triangular_discrimination
n_rows = len(ratios_list)
n_cols = len(divergences_dict)
x_values = np.arange(0., 1.0, .005)
pyplot.subplots_adjust(wspace=0.1, hspace=0.1)
STATS_dict = dict()
for i, divergence_name in enumerate(divergences_dict.keys()):
print('*** D-function: ' + divergence_name + ' ***')
for j, ratio in enumerate(ratios_list):
ax = pyplot.subplot(n_rows, n_cols, j*n_cols + i + 1)
# Compute and draw d-function values (blue curves)
divergence = divergences_dict[divergence_name]
divergence_values = [divergence(risk, x, ratio) for x in x_values]
pyplot.plot(x_values, divergence_values, linewidth=2)
# Compute and draw bound values (horizontal lines) for each value of N
for N in N_list:
m = N * ratio
complexity_term = compute_transductive_complexity_term(divergence, m, N)
bound = compute_general_transductive_gibbs_bound(divergence, risk, m, N, KLQP, delta=delta, complexity_term=complexity_term)
rhs = (KLQP + log(complexity_term / delta)) / m
print('m=%d N=%d bound=%0.3f' % (m,N,bound) )
handle = pyplot.plot([-1., bound, 2.], 3*[rhs], 'o--', label='%0.3f' % bound)[0]
STATS_dict[(i, N, ratio)] = (bound, rhs, handle)
# Compute and draw risk limits (vertical dashed lines)
risk_sup = 1. - ratio * (1.-risk)
risk_inf = ratio * risk
pyplot.plot(2*[risk_sup], [0., 1.], 'k:')
pyplot.plot(2*[risk_inf], [0., 1.], 'k:')
# Set plot properties
pyplot.legend(loc=2)
pyplot.xlim(0., 1.)
pyplot.ylim(0., .5 if ratio > .4 else 1.)
if j == n_rows-1:
pyplot.xlabel(divergence_name)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(13)
else:
pyplot.setp(ax.get_xticklabels(), visible=False)
if i == 0:
pyplot.ylabel("m/N = %0.1f" % ratio)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(13)
else:
pyplot.setp(ax.get_yticklabels(), visible=False)
# Highlight lower bounds for each (m,N) pairs
for N in N_list:
for j, ratio in enumerate(ratios_list):
best_bound = 1e6
best_i = -1
for i, _ in enumerate(divergences_dict.keys()):
bound, rhs, handle = STATS_dict[(i, N, ratio)]
if bound < best_bound:
best_bound, best_handle, best_i = bound, handle, i
best_handle.set_marker('*')
best_handle.set_markersize(14.)
best_handle.set_markeredgewidth(0.)
pyplot.subplot(n_rows, n_cols, j * n_cols + best_i + 1)
pyplot.legend(loc=2)
pyplot.show()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if __name__ == '__main__':
print( __doc__ )
main()
| bsd-2-clause |
ky822/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
BigTone2009/sms-tools | lectures/03-Fourier-properties/plots-code/zero-padding.py | 26 | 1083 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming
from scipy.fftpack import fft, fftshift
plt.figure(1, figsize=(9.5, 6))
M = 8
N1 = 8
N2 = 16
N3 = 32
x = np.cos(2*np.pi*2/M*np.arange(M)) * np.hanning(M)
plt.subplot(4,1,1)
plt.title('x, M=8')
plt.plot(np.arange(-M/2.0,M/2), x, 'b', marker='x', lw=1.5)
plt.axis([-M/2,M/2-1,-1,1])
mX = 20 * np.log10(np.abs(fftshift(fft(x, N1))))
plt.subplot(4,1,2)
plt.plot(np.arange(-N1/2.0,N1/2), mX, marker='x', color='r', lw=1.5)
plt.axis([-N1/2,N1/2-1,-20,max(mX)+1])
plt.title('magnitude spectrum: mX1, N=8')
mX = 20 * np.log10(np.abs(fftshift(fft(x, N2))))
plt.subplot(4,1,3)
plt.plot(np.arange(-N2/2.0,N2/2),mX,marker='x',color='r', lw=1.5)
plt.axis([-N2/2,N2/2-1,-20,max(mX)+1])
plt.title('magnitude spectrum: mX2, N=16')
mX = 20 * np.log10(np.abs(fftshift(fft(x, N3))))
plt.subplot(4,1,4)
plt.plot(np.arange(-N3/2.0,N3/2),mX,marker='x',color='r', lw=1.5)
plt.axis([-N3/2,N3/2-1,-20,max(mX)+1])
plt.title('magnitude spectrum: mX3, N=32')
plt.tight_layout()
plt.savefig('zero-padding.png')
plt.show()
| agpl-3.0 |
ClimbsRocks/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 23 | 15933 | import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
@ignore_warnings # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
if scorer_name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm)
else:
score = scorer(estimator, X_mm, y_mm)
assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
| bsd-3-clause |
GillesPy/gillespy | examples/Volume_test.py | 1 | 2476 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 5 16:50:12 2015
@author: john
"""
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
import gillespy
class Simple1(gillespy.Model):
"""
This is a simple example for mass-action degradation of species S.
"""
def __init__(self, parameter_values=None,volume=1.0):
# Initialize the model.
gillespy.Model.__init__(self, name="simple1", volume=volume)
# Parameters
k1 = gillespy.Parameter(name='k1', expression=0.03)
self.add_parameter(k1)
# Species
r1 = gillespy.Species(name='r1', initial_value=100)
self.add_species(r1)
r2 = gillespy.Species(name='r2', initial_value=100)
self.add_species(r2)
# Reactions
rxn1 = gillespy.Reaction(
name = 'r1d',
reactants = {r1:2},
products = {},
rate = k1 )
self.add_reaction(rxn1)
rxn2 = gillespy.Reaction(
name = 'r2d',
reactants = {r2:2},
products = {},
propensity_function = 'k1/2 * r2*(r2-1)/vol' )
self.add_reaction(rxn2)
if __name__ == '__main__':
# Here, we create the model object.
# We could pass new parameter values to this model here if we wished.
simple_1 = Simple1(volume=10)
import time
tick = time.time()
# The model object is simulated with the StochKit solver, and 25
# trajectories are returned.
num_trajectories = 1
simple_1trajectories = gillespy.StochKitSolver.run(simple_1,
number_of_trajectories = num_trajectories, show_labels=False)
print time.time() - tick
# PLOTTING
# here, we will plot all trajectories with the mean overlaid
from matplotlib import gridspec
gs = gridspec.GridSpec(1,1)
plt.figure()
ax0 = plt.subplot(gs[0,0])
# extract time values
time = np.array(simple_1trajectories[0][:,0])
# extract just the trajectories for S into a numpy array
#plot mean
ax0.plot(time,simple_1trajectories[0][:,1],'k--',label='ma')
ax0.plot(time,simple_1trajectories[0][:,2],'g+',label='custom')
ax0.legend()
ax0.set_xlabel('Time')
ax0.set_ylabel('Species r Count')
plt.tight_layout()
plt.show()
| gpl-3.0 |
trankmichael/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
kmather73/zipline | zipline/data/ffc/loaders/us_equity_pricing.py | 16 | 21283 | # Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import (
ABCMeta,
abstractmethod,
)
from contextlib import contextmanager
from errno import ENOENT
from os import remove
from os.path import exists
from bcolz import (
carray,
ctable,
)
from click import progressbar
from numpy import (
array,
array_equal,
float64,
floating,
full,
iinfo,
integer,
issubdtype,
uint32,
)
from pandas import (
DatetimeIndex,
read_csv,
Timestamp,
)
from six import (
iteritems,
string_types,
with_metaclass,
)
import sqlite3
from zipline.data.ffc.base import FFCLoader
from zipline.data.ffc.loaders._us_equity_pricing import (
_compute_row_slices,
_read_bcolz_data,
load_adjustments_from_sqlite,
)
from zipline.lib.adjusted_array import (
adjusted_array,
)
from zipline.errors import NoFurtherDataError
OHLC = frozenset(['open', 'high', 'low', 'close'])
US_EQUITY_PRICING_BCOLZ_COLUMNS = [
'open', 'high', 'low', 'close', 'volume', 'day', 'id'
]
DAILY_US_EQUITY_PRICING_DEFAULT_FILENAME = 'daily_us_equity_pricing.bcolz'
SQLITE_ADJUSTMENT_COLUMNS = frozenset(['effective_date', 'ratio', 'sid'])
SQLITE_ADJUSTMENT_COLUMN_DTYPES = {
'effective_date': integer,
'ratio': floating,
'sid': integer,
}
SQLITE_ADJUSTMENT_TABLENAMES = frozenset(['splits', 'dividends', 'mergers'])
UINT32_MAX = iinfo(uint32).max
@contextmanager
def passthrough(obj):
yield obj
class BcolzDailyBarWriter(with_metaclass(ABCMeta)):
"""
Class capable of writing daily OHLCV data to disk in a format that can be
read efficiently by BcolzDailyOHLCVReader.
See Also
--------
BcolzDailyBarReader : Consumer of the data written by this class.
"""
@abstractmethod
def gen_tables(self, assets):
"""
Return an iterator of pairs of (asset_id, bcolz.ctable).
"""
raise NotImplementedError()
@abstractmethod
def to_uint32(self, array, colname):
"""
Convert raw column values produced by gen_tables into uint32 values.
Parameters
----------
array : np.array
An array of raw values.
colname : str, {'open', 'high', 'low', 'close', 'volume', 'day'}
The name of the column being loaded.
For output being read by the default BcolzOHLCVReader, data should be
stored in the following manner:
- Pricing columns (Open, High, Low, Close) should be stored as 1000 *
as-traded dollar value.
- Volume should be the as-traded volume.
- Dates should be stored as seconds since midnight UTC, Jan 1, 1970.
"""
raise NotImplementedError()
def write(self, filename, calendar, assets, show_progress=False):
"""
Parameters
----------
filename : str
The location at which we should write our output.
calendar : pandas.DatetimeIndex
Calendar to use to compute asset calendar offsets.
assets : pandas.Int64Index
The assets for which to write data.
show_progress : bool
Whether or not to show a progress bar while writing.
Returns
-------
table : bcolz.ctable
The newly-written table.
"""
_iterator = self.gen_tables(assets)
if show_progress:
pbar = progressbar(
_iterator,
length=len(assets),
item_show_func=lambda i: i if i is None else str(i[0]),
label="Merging asset files:",
)
with pbar as pbar_iterator:
return self._write_internal(filename, calendar, pbar_iterator)
return self._write_internal(filename, calendar, _iterator)
def _write_internal(self, filename, calendar, iterator):
"""
Internal implementation of write.
`iterator` should be an iterator yielding pairs of (asset, ctable).
"""
total_rows = 0
first_row = {}
last_row = {}
calendar_offset = {}
# Maps column name -> output carray.
columns = {
k: carray(array([], dtype=uint32))
for k in US_EQUITY_PRICING_BCOLZ_COLUMNS
}
for asset_id, table in iterator:
nrows = len(table)
for column_name in columns:
if column_name == 'id':
# We know what the content of this column is, so don't
# bother reading it.
columns['id'].append(full((nrows,), asset_id))
continue
columns[column_name].append(
self.to_uint32(table[column_name][:], column_name)
)
# Bcolz doesn't support ints as keys in `attrs`, so convert
# assets to strings for use as attr keys.
asset_key = str(asset_id)
# Calculate the index into the array of the first and last row
# for this asset. This allows us to efficiently load single
# assets when querying the data back out of the table.
first_row[asset_key] = total_rows
last_row[asset_key] = total_rows + nrows - 1
total_rows += nrows
# Calculate the number of trading days between the first date
# in the stored data and the first date of **this** asset. This
# offset used for output alignment by the reader.
# HACK: Index with a list so that we get back an array we can pass
# to self.to_uint32. We could try to extract this in the loop
# above, but that makes the logic a lot messier.
asset_first_day = self.to_uint32(table['day'][[0]], 'day')[0]
calendar_offset[asset_key] = calendar.get_loc(
Timestamp(asset_first_day, unit='s', tz='UTC'),
)
# This writes the table to disk.
full_table = ctable(
columns=[
columns[colname]
for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS
],
names=US_EQUITY_PRICING_BCOLZ_COLUMNS,
rootdir=filename,
mode='w',
)
full_table.attrs['first_row'] = first_row
full_table.attrs['last_row'] = last_row
full_table.attrs['calendar_offset'] = calendar_offset
full_table.attrs['calendar'] = calendar.asi8.tolist()
return full_table
class DailyBarWriterFromCSVs(BcolzDailyBarWriter):
"""
BcolzDailyBarWriter constructed from a map from csvs to assets.
Parameters
----------
asset_map : dict
A map from asset_id -> path to csv with data for that asset.
CSVs should have the following columns:
day : datetime64
open : float64
high : float64
low : float64
close : float64
volume : int64
"""
_csv_dtypes = {
'open': float64,
'high': float64,
'low': float64,
'close': float64,
'volume': float64,
}
def __init__(self, asset_map):
self._asset_map = asset_map
def gen_tables(self, assets):
"""
Read CSVs as DataFrames from our asset map.
"""
dtypes = self._csv_dtypes
for asset in assets:
path = self._asset_map.get(asset)
if path is None:
raise KeyError("No path supplied for asset %s" % asset)
data = read_csv(path, parse_dates=['day'], dtype=dtypes)
yield asset, ctable.fromdataframe(data)
def to_uint32(self, array, colname):
arrmax = array.max()
if colname in OHLC:
self.check_uint_safe(arrmax * 1000, colname)
return (array * 1000).astype(uint32)
elif colname == 'volume':
self.check_uint_safe(arrmax, colname)
return array.astype(uint32)
elif colname == 'day':
nanos_per_second = (1000 * 1000 * 1000)
self.check_uint_safe(arrmax.view(int) / nanos_per_second, colname)
return (array.view(int) / nanos_per_second).astype(uint32)
@staticmethod
def check_uint_safe(value, colname):
if value >= UINT32_MAX:
raise ValueError(
"Value %s from column '%s' is too large" % (value, colname)
)
class BcolzDailyBarReader(object):
"""
Reader for raw pricing data written by BcolzDailyOHLCVWriter.
A Bcolz CTable is comprised of Columns and Attributes.
Columns
-------
The table with which this loader interacts contains the following columns:
['open', 'high', 'low', 'close', 'volume', 'day', 'id'].
The data in these columns is interpreted as follows:
- Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 *
as-traded dollar value.
- Volume is interpreted as as-traded volume.
- Day is interpreted as seconds since midnight UTC, Jan 1, 1970.
- Id is the asset id of the row.
The data in each column is grouped by asset and then sorted by day within
each asset block.
The table is built to represent a long time range of data, e.g. ten years
of equity data, so the lengths of each asset block is not equal to each
other. The blocks are clipped to the known start and end date of each asset
to cut down on the number of empty values that would need to be included to
make a regular/cubic dataset.
When read across the open, high, low, close, and volume with the same
index should represent the same asset and day.
Attributes
----------
The table with which this loader interacts contains the following
attributes:
first_row : dict
Map from asset_id -> index of first row in the dataset with that id.
last_row : dict
Map from asset_id -> index of last row in the dataset with that id.
calendar_offset : dict
Map from asset_id -> calendar index of first row.
calendar : list[int64]
Calendar used to compute offsets, in asi8 format (ns since EPOCH).
We use first_row and last_row together to quickly find ranges of rows to
load when reading an asset's data into memory.
We use calendar_offset and calendar to orient loaded blocks within a
range of queried dates.
"""
def __init__(self, table):
if isinstance(table, string_types):
table = ctable(rootdir=table, mode='r')
self._table = table
self._calendar = DatetimeIndex(table.attrs['calendar'], tz='UTC')
self._first_rows = {
int(asset_id): start_index
for asset_id, start_index in iteritems(table.attrs['first_row'])
}
self._last_rows = {
int(asset_id): end_index
for asset_id, end_index in iteritems(table.attrs['last_row'])
}
self._calendar_offsets = {
int(id_): offset
for id_, offset in iteritems(table.attrs['calendar_offset'])
}
def _slice_locs(self, start_date, end_date):
try:
start = self._calendar.get_loc(start_date)
except KeyError:
if start_date < self._calendar[0]:
raise NoFurtherDataError(
msg=(
"FFC Query requesting data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(self._calendar[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
try:
stop = self._calendar.get_loc(end_date)
except:
if end_date > self._calendar[-1]:
raise NoFurtherDataError(
msg=(
"FFC Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=self._calendar[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return start, stop
def _compute_slices(self, dates, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates.
Parameters
----------
dates : pandas.DatetimeIndex
Dates of the query on which we want to compute row indices.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
start, stop = self._slice_locs(dates[0], dates[-1])
# Sanity check that the requested date range matches our calendar.
# This could be removed in the future if it's materially affecting
# performance.
query_dates = self._calendar[start:stop + 1]
if not array_equal(query_dates.values, dates.values):
raise ValueError("Incompatible calendars!")
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start,
stop,
assets,
)
def load_raw_arrays(self, columns, dates, assets):
first_rows, last_rows, offsets = self._compute_slices(dates, assets)
return _read_bcolz_data(
self._table,
(len(dates), len(assets)),
[column.name for column in columns],
first_rows,
last_rows,
offsets,
)
class SQLiteAdjustmentWriter(object):
"""
Writer for data to be read by SQLiteAdjustmentWriter
Parameters
----------
conn_or_path : str or sqlite3.Connection
A handle to the target sqlite database.
overwrite : bool, optional, default=False
If True and conn_or_path is a string, remove any existing files at the
given path before connecting.
See Also
--------
SQLiteAdjustmentReader
"""
def __init__(self, conn_or_path, overwrite=False):
if isinstance(conn_or_path, sqlite3.Connection):
self.conn = conn_or_path
elif isinstance(conn_or_path, str):
if overwrite and exists(conn_or_path):
try:
remove(conn_or_path)
except OSError as e:
if e.errno != ENOENT:
raise
self.conn = sqlite3.connect(conn_or_path)
else:
raise TypeError("Unknown connection type %s" % type(conn_or_path))
def write_frame(self, tablename, frame):
if frozenset(frame.columns) != SQLITE_ADJUSTMENT_COLUMNS:
raise ValueError(
"Unexpected frame columns:\n"
"Expected Columns: %s\n"
"Received Columns: %s" % (
SQLITE_ADJUSTMENT_COLUMNS,
frame.columns.tolist(),
)
)
elif tablename not in SQLITE_ADJUSTMENT_TABLENAMES:
raise ValueError(
"Adjustment table %s not in %s" % (
tablename, SQLITE_ADJUSTMENT_TABLENAMES
)
)
expected_dtypes = SQLITE_ADJUSTMENT_COLUMN_DTYPES
actual_dtypes = frame.dtypes
for colname, expected in iteritems(expected_dtypes):
actual = actual_dtypes[colname]
if not issubdtype(actual, expected):
raise TypeError(
"Expected data of type {expected} for column '{colname}', "
"but got {actual}.".format(
expected=expected,
colname=colname,
actual=actual,
)
)
return frame.to_sql(tablename, self.conn)
def write(self, splits, mergers, dividends):
"""
Writes data to a SQLite file to be read by SQLiteAdjustmentReader.
Parameters
----------
splits : pandas.DataFrame
Dataframe containing split data.
mergers : pandas.DataFrame
DataFrame containing merger data.
dividends : pandas.DataFrame
DataFrame containing dividend data.
Notes
-----
DataFrame input (`splits`, `mergers`, and `dividends`) should all have
the following columns:
effective_date : int
The date, represented as seconds since Unix epoch, on which the
adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
sid : int
The asset id associated with this adjustment.
The ratio column is interpreted as follows:
- For all adjustment types, multiply price fields ('open', 'high',
'low', and 'close') by the ratio.
- For **splits only**, **divide** volume by the adjustment ratio.
Dividend ratios should be calculated as
1.0 - (dividend_value / "close on day prior to dividend ex_date").
Returns
-------
None
See Also
--------
SQLiteAdjustmentReader : Consumer for the data written by this class
"""
self.write_frame('splits', splits)
self.write_frame('mergers', mergers)
self.write_frame('dividends', dividends)
self.conn.execute(
"CREATE INDEX splits_sids "
"ON splits(sid)"
)
self.conn.execute(
"CREATE INDEX splits_effective_date "
"ON splits(effective_date)"
)
self.conn.execute(
"CREATE INDEX mergers_sids "
"ON mergers(sid)"
)
self.conn.execute(
"CREATE INDEX mergers_effective_date "
"ON mergers(effective_date)"
)
self.conn.execute(
"CREATE INDEX dividends_sid "
"ON dividends(sid)"
)
self.conn.execute(
"CREATE INDEX dividends_effective_date "
"ON dividends(effective_date)"
)
def close(self):
self.conn.close()
class SQLiteAdjustmentReader(object):
"""
Loads adjustments based on corporate actions from a SQLite database.
Expects data written in the format output by `SQLiteAdjustmentWriter`.
Parameters
----------
conn : str or sqlite3.Connection
Connection from which to load data.
"""
def __init__(self, conn):
if isinstance(conn, str):
conn = sqlite3.connect(conn)
self.conn = conn
def load_adjustments(self, columns, dates, assets):
return load_adjustments_from_sqlite(
self.conn,
[column.name for column in columns],
dates,
assets,
)
class USEquityPricingLoader(FFCLoader):
"""
FFCLoader for US Equity Pricing
Delegates loading of baselines and adjustments.
"""
def __init__(self, raw_price_loader, adjustments_loader):
self.raw_price_loader = raw_price_loader
self.adjustments_loader = adjustments_loader
def load_adjusted_array(self, columns, mask):
dates, assets = mask.index, mask.columns
raw_arrays = self.raw_price_loader.load_raw_arrays(
columns,
dates,
assets,
)
adjustments = self.adjustments_loader.load_adjustments(
columns,
dates,
assets,
)
return [
adjusted_array(raw_array, mask.values, col_adjustments)
for raw_array, col_adjustments in zip(raw_arrays, adjustments)
]
| apache-2.0 |
RoanFourie/ArchestrA-Tools | aaTools/ww-history-daily-max.py | 1 | 2991 | '''
Python v3.8 or later
Windows
DESCRIPTION: Get from Historian the daily maximum values (filtered between two time ranges during days) between two dates.
OWNER: Roan Fourie
REVISION: 0
REVISION DATE: 2020-08-17 (Week 34)
REVISION AUTHOR: Roan Fourie
REVISION DESCRIPTION:
Edit the list of tags and edit the dates and times where data must be parsed from.
Return files with the day's maximum value.
USAGE:
NOTES:
'''
import pyodbc
import pandas as pd
import numpy as np
################################################################################
########################### EDIT START HERE #################################
################################################################################
tags = ['KDCE_GP2_00INS01_00IQWT320.FA_PV0',
'KDCE_GP2_00INS02_00IQWT320.FA_PV0',
'KDCE_GP2_00INS03_00IQWT320.FA_PV0'
]
server_name = 'HISTORIANSERVERNAME'
start_dt = '20200501 05:00:00.000' # Note the format: '20200501 05:00:00.000'
end_dt = '20200814 05:00:00.000' # Note the format: '20200814 05:00:00.000'
time_gt = '01:00' # The begin time ('01:00')
time_lt = '05:30' # The end time ('05:00')
# The time periods could be removed if you want to check the complete day.
# Remember to take it out of the SQL Query as well.
################################################################################
########################### EDIT END HERE #################################
################################################################################
# Make a SQL connection
cnxn = pyodbc.connect("Driver={SQL Server Native Client 11.0};"
f"Server={server_name};"
"Database=Runtime;"
"Trusted_Connection=yes;")
with cnxn: # using the with statement will automatically close the connections
cursor = cnxn.cursor()
for tag in tags:
# Query results from SQL into a Pandas Data Frame
df = pd.read_sql_query(f"SET QUOTED_IDENTIFIER OFF \
SELECT * FROM OPENQUERY(INSQL, \"SELECT DateTime = convert(nvarchar, DateTime, 21),Time = convert(char(5), DateTime, 108), [{tag}] \
FROM WideHistory \
WHERE wwRetrievalMode = 'Cyclic' \
AND wwResolution = 600000 \
AND wwQualityRule = 'Extended' \
AND wwVersion = 'Latest' \
AND DateTime >= '{start_dt}' \
AND DateTime <= '{end_dt}'\") \
where Time >= '{time_gt}' \
AND Time <= '{time_lt}'", cnxn)
# Convert DateTime string to date time object type
df['DateTime'] = pd.to_datetime(df['DateTime'])
# Get only the date without the time
df['DT'] = df['DateTime'].dt.date
# Group the dates together A-Z then select the one with the largest value in the group
# This will give the maximum for the day in a new data frame
df1 = df.groupby(['DT'], sort=False)[tag].max()
print(df1)
df1.to_csv(f'{tag}-results.csv', sep=",", encoding='utf-8')
| mit |
fmfn/UnbalancedDataset | imblearn/ensemble/tests/test_weight_boosting.py | 2 | 3494 | import pytest
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.utils._testing import assert_array_equal
from imblearn.ensemble import RUSBoostClassifier
@pytest.fixture
def imbalanced_dataset():
return make_classification(
n_samples=10000,
n_features=3,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=3,
n_clusters_per_class=1,
weights=[0.01, 0.05, 0.94],
class_sep=0.8,
random_state=0,
)
@pytest.mark.parametrize(
"boosting_params, err_msg",
[
({"n_estimators": "whatever"}, "n_estimators must be an integer"),
({"n_estimators": -100}, "n_estimators must be greater than zero"),
],
)
def test_rusboost_error(imbalanced_dataset, boosting_params, err_msg):
rusboost = RUSBoostClassifier(**boosting_params)
with pytest.raises(ValueError, match=err_msg):
rusboost.fit(*imbalanced_dataset)
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_rusboost(imbalanced_dataset, algorithm):
X, y = imbalanced_dataset
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=1
)
classes = np.unique(y)
n_estimators = 500
rusboost = RUSBoostClassifier(
n_estimators=n_estimators, algorithm=algorithm, random_state=0
)
rusboost.fit(X_train, y_train)
assert_array_equal(classes, rusboost.classes_)
# check that we have an ensemble of samplers and estimators with a
# consistent size
assert len(rusboost.estimators_) > 1
assert len(rusboost.estimators_) == len(rusboost.samplers_)
assert len(rusboost.pipelines_) == len(rusboost.samplers_)
# each sampler in the ensemble should have different random state
assert len({sampler.random_state for sampler in rusboost.samplers_}) == len(
rusboost.samplers_
)
# each estimator in the ensemble should have different random state
assert len({est.random_state for est in rusboost.estimators_}) == len(
rusboost.estimators_
)
# check the consistency of the feature importances
assert len(rusboost.feature_importances_) == imbalanced_dataset[0].shape[1]
# check the consistency of the prediction outpus
y_pred = rusboost.predict_proba(X_test)
assert y_pred.shape[1] == len(classes)
assert rusboost.decision_function(X_test).shape[1] == len(classes)
score = rusboost.score(X_test, y_test)
assert score > 0.7, f"Failed with algorithm {algorithm} and score {score}"
y_pred = rusboost.predict(X_test)
assert y_pred.shape == y_test.shape
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_rusboost_sample_weight(imbalanced_dataset, algorithm):
X, y = imbalanced_dataset
sample_weight = np.ones_like(y)
rusboost = RUSBoostClassifier(algorithm=algorithm, random_state=0)
# Predictions should be the same when sample_weight are all ones
y_pred_sample_weight = rusboost.fit(X, y, sample_weight).predict(X)
y_pred_no_sample_weight = rusboost.fit(X, y).predict(X)
assert_array_equal(y_pred_sample_weight, y_pred_no_sample_weight)
rng = np.random.RandomState(42)
sample_weight = rng.rand(y.shape[0])
y_pred_sample_weight = rusboost.fit(X, y, sample_weight).predict(X)
with pytest.raises(AssertionError):
assert_array_equal(y_pred_no_sample_weight, y_pred_sample_weight)
| mit |
procoder317/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 149 | 7173 | # Authors: Tom Dupre la Tour <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
bottydim/detect-credit-card-fraud | ccfd_dnn/model_subsample.py | 1 | 13527 | import pandas as pd
import matplotlib
import numpy as np
import math
import matplotlib.pyplot as plt
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import plotly.tools as tls
import pandas as pd
from sqlalchemy import create_engine # database connection
import datetime as dt
import io
import plotly.plotly as py # interactive graphing
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from plotly.graph_objs import Bar, Scatter, Marker, Layout
from heraspy.model import HeraModel
np.random.seed(1337)
import theano
import keras
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
from keras.layers import Input, Dense, GRU, LSTM, TimeDistributed, Masking,merge
from model import *
if __name__ == "__main__":
print "Commencing..."
data_dir = './data/'
evt_name = 'Featurespace_events_output.csv'
auth_name = 'Featurespace_auths_output.csv'
db_name = 'c1_agg.db'
disk_engine = create_engine('sqlite:///'+data_dir+db_name,convert_unicode=True)
disk_engine.raw_connection().connection.text_factory = str
####################################DATA SOURCE################################
# table = 'data_trim'
# rsl_file = './data/gs_results_trim.csv'
table = 'data_little'
rsl_file = './data/gs_results_little.csv'
# table = 'data_more'
# rsl_file = './data/gs_results_more.csv'
################################################################################
#######################Settings#############################################
samples_per_epoch = trans_num_table(table,disk_engine,mode='train',trans_mode='train')
samples_per_epoch = 1879
epoch_limit = samples_per_epoch
print "SAMPLES per epoch:",samples_per_epoch
user_sample_size = 1232
# user_sample_size = 2000
print "User sample size:",user_sample_size
# samples_per_epoch = 1959
# table = 'data_trim'
# samples_per_epoch = 485
nb_epoch = 30
lbl_pad_val = 2
pad_val = -1
hid_dims = [256,320]
num_l = [3,4]
lr_s = [2.5e-4]
# lr_s = [1e-2,1e-3,1e-4]
# lr_s = [1e-1,1e-2,1e-3]
num_opt = 1
opts = lambda x,lr:[keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08),
# keras.optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08),
# keras.optimizers.Nadam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
][x]
add_info = str(int(seq_len_param))+'subsample_class_w'
encoders = populate_encoders_scale(table,disk_engine)
gru_dict = {}
lstm_dict = {}
for hidden_dim in hid_dims:
# gru
for opt_id in range(num_opt):
for lr in lr_s:
optimizer = opts(opt_id,lr)
for num_layers in num_l:
for rnn in ['gru']:
title = 'Bidirectional_Loss'+'_'+rnn.upper()+'_'+str(hidden_dim)+'_'+str(num_layers)+'_'+str(type(optimizer).__name__)+'_'+str(lr)
print title
input_layer = Input(shape=(int(seq_len_param), 44),name='main_input')
mask = Masking(mask_value=0)(input_layer)
x = mask
for i in range(num_layers):
if rnn == 'gru':
prev_frw = GRU(hidden_dim,#input_length=50,
return_sequences=True,go_backwards=False,stateful=False,
unroll=False,consume_less='gpu',
init='glorot_uniform', inner_init='orthogonal', activation='tanh',
inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None,
b_regularizer=None, dropout_W=0.0, dropout_U=0.0)(x)
prev_bck = GRU(hidden_dim,#input_length=50,
return_sequences=True,go_backwards=True,stateful=False,
unroll=False,consume_less='gpu',
init='glorot_uniform', inner_init='orthogonal', activation='tanh',
inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None,
b_regularizer=None, dropout_W=0.0, dropout_U=0.0)(x)
else:
prev_frw = LSTM(hidden_dim, return_sequences=True,go_backwards=False,stateful=False,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=0.0, dropout_U=0.0)(x)
prev_bck = LSTM(hidden_dim, return_sequences=True,go_backwards=True,stateful=False,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=0.0, dropout_U=0.0)(x)
x = merge([prev_frw, prev_bck], mode='concat')
output_layer = TimeDistributed(Dense(3,activation='softmax'))(x)
model = Model(input=[input_layer],output=[output_layer])
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
user_mode = 'train'
trans_mode = 'train'
data_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=400,usr_ratio=80,class_weight=None,lbl_pad_val = 2, pad_val = -1,
sub_sample=user_sample_size,epoch_size=epoch_limit)
# sub_sample=user_sample_size,epoch_size=samples_per_epoch)
history = model.fit_generator(data_gen, samples_per_epoch, nb_epoch, verbose=1, callbacks=[],validation_data=None, nb_val_samples=None, class_weight=None, max_q_size=10000)
py.sign_in('bottydim', 'o1kuyms9zv')
auc_list = []
print '#########################TRAIN STATS################'
user_mode = 'train'
trans_mode = 'train'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
data_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=400,usr_ratio=80,class_weight=None,lbl_pad_val = 2, pad_val = -1)
eval_list = eval_auc_generator(model, data_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '##################EVALUATION USERS#########################'
user_mode = 'test'
trans_mode = 'train'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
eval_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=400,usr_ratio=80,class_weight=None,lbl_pad_val = 2, pad_val = -1)
eval_list = eval_auc_generator(model, eval_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '#####################################################'
print '##################EVALUATION Transactions#########################'
user_mode = 'train'
trans_mode = 'test'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
eval_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=400,usr_ratio=80,class_weight=None,lbl_pad_val = 2, pad_val = -1)
eval_list = eval_auc_generator(model, eval_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '#####################################################'
print '##################EVALUATION Pure#########################'
user_mode = 'test'
trans_mode = 'test'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
eval_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=400,usr_ratio=80,class_weight=None,lbl_pad_val = 2, pad_val = -1)
eval_list = eval_auc_generator(model, eval_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '#####################################################'
with io.open(rsl_file, 'a', encoding='utf-8') as file:
auc_string = ','.join(auc_list)
title_csv = title.replace('_',',')+','+str(history.history['acc'][-1])+','+str(history.history['loss'][-1])+','+str(auc_val)+','+str(acc)+','+auc_string+'\n'
file.write(unicode(title_csv))
print 'logged'
trim_point = -15
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['loss'][trim_point:])],
'layout': {'title': title}
}
py.image.save_as(fig,filename='./figures/GS/'+table+'/'+title+'_'+table+'_'+add_info+".png")
# iplot(fig,filename='figures/'+title,image='png')
title = title.replace('Loss','Acc')
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['acc'][trim_point:])],
'layout': {'title': title}
}
py.image.save_as(fig,filename='./figures/GS/'+table+'/'+title+'_'+table+'_'+add_info+".png")
| mit |
beni55/sympy | sympy/plotting/plot.py | 3 | 64516 | """Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
from inspect import getargspec
from itertools import chain
from collections import Callable
import warnings
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(False)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor='b', edgecolor='None' )
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", "blue"])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
else:
if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
self.ax.set_xlim(min(starts), max(ends))
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
if self.ax.legend():
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
vector_a = x - y
vector_b = z - y
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set().union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set().union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
| bsd-3-clause |
seansu4you87/kupo | projects/MOOCs/udacity/drive/project-5-vehicle-detection/src/explore/hog_model.py | 1 | 5680 | import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import time
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
from skimage.feature import hog
# NOTE: the next import is only valid for scikit-learn version <= 0.17
# for scikit-learn >= 0.18 use:
# from sklearn.model_selection import train_test_split
from sklearn.cross_validation import train_test_split
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(imgs, cspace='RGB', orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in imgs:
# Read in each one by one
image = mpimg.imread(file)
# apply color conversion if other than 'RGB'
if cspace != 'RGB':
if cspace == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif cspace == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
# Call get_hog_features() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
features.append(hog_features)
# Return list of feature vectors
return features
# Divide up into cars and notcars
cars = glob.glob("../resources/vehicles/**/*.png", recursive=True)
notcars = glob.glob("../resources/non-vehicles/**/*.png", recursive=True)
# Reduce the sample size because HOG features are slow to compute
# The quiz evaluator times out after 13s of CPU time
sample_size = 500
cars = cars[0:sample_size]
notcars = notcars[0:sample_size]
### TODO: Tweak these parameters and see how the results change.
colorspace = 'RGB' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 9
pix_per_cell = 8
cell_per_block = 2
hog_channel = 0 # Can be 0, 1, 2, or "ALL"
t=time.time()
car_features = extract_features(cars, cspace=colorspace, orient=orient,
pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
hog_channel=hog_channel)
notcar_features = extract_features(notcars, cspace=colorspace, orient=orient,
pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
hog_channel=hog_channel)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to extract HOG features...')
# Create an array stack of feature vectors
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
scaled_X = X_scaler.transform(X)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
scaled_X, y, test_size=0.2, random_state=rand_state)
print('Using:',orient,'orientations',pix_per_cell,
'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
svc = LinearSVC()
# Check the training time for the SVC
t=time.time()
svc.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
# Check the prediction time for a single sample
t=time.time()
n_predict = 10
print('My SVC predicts: ', svc.predict(X_test[0:n_predict]))
print('For these',n_predict, 'labels: ', y_test[0:n_predict])
t2 = time.time()
print(round(t2-t, 5), 'Seconds to predict', n_predict,'labels with SVC') | mit |
ruthfranklin/hande | tools/plot_dynamics.py | 1 | 3047 | #!/usr/bin/env python
'''plot_dynamics.py file
Plot the population dynamics and energy profile of a HANDE QMC output file.'''
import argparse
import os
import pkgutil
import sys
import matplotlib.pyplot as pyplot
import numpy as np
import pandas as pd
if not pkgutil.find_loader('pyblock'):
_script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_script_dir, '../pyhande'))
import pyhande
def main(datafile, plotfile):
hande_out = pyhande.extract.extract_data(datafile)
for (metadata, data) in hande_out:
shoulder = pyhande.analysis.plateau_estimator(data)
# Plot the total population over the entire range
pyplot.subplot(3,1,1)
pyplot.plot(data['iterations'], data['# H psips'])
pyplot.xlabel('iteration')
pyplot.ylabel('Total Population')
# Plot the energy estimators over the entire range
pyplot.subplot(3,1,2)
pyplot.plot(data['iterations'], data['\sum H_0j N_j']/data['N_0'], label='Proj. Energy')
pyplot.plot(data['iterations'], data['Shift'], label='Shift')
pyplot.xlabel('iteration')
pyplot.ylabel('Energy / $E_{h}$')
pyplot.legend()
pyplot.subplot(3,1,3)
# Plot the total population up to the shoulder
height = shoulder['mean']['shoulder height']
data_around_shoulder = data[np.logical_and(data['# H psips'] < 1.1*height,
data['# H psips'] > 0.9*height) ]
pyplot.plot(data_around_shoulder['iterations'],
data_around_shoulder['# H psips'], label='Total Population')
x_points = [min(data_around_shoulder['iterations']),
max(data_around_shoulder['iterations'])]
pyplot.plot(x_points, [height, height], label='Shoulder Height')
pyplot.xlabel('iteration')
pyplot.ylabel('Population')
pyplot.legend(loc=2)
pyplot.draw()
if plotfile == '-':
pyplot.show()
else:
pyplot.savefig(plotfile)
# Also print out the information about the shoulder
# Stealing from reblock_hande.py
try:
float_fmt = '{0:-#.8e}'.format
float_fmt(1.0)
except ValueError:
# GAH. Alternate formatting only added to format function after
# python 2.6..
float_fmt = '{0:-.8e}'.format
print(shoulder.to_string(float_format=float_fmt, line_width=80))
def parse_args(args):
parser = argparse.ArgumentParser(description='Plot the population and energy estimators of an FCIQMC/CCMC calulation')
parser.add_argument('-p', '--plotfile', default='-', help='File to save the graphs to. '
'The graphs are shown interactively if "-". Default: %(default)s')
parser.add_argument('file', help='File to plot.')
opts = parser.parse_args(args)
return (opts.file, opts.plotfile)
if __name__ == '__main__':
(datafile, plotfile) = parse_args(sys.argv[1:])
main(datafile, plotfile)
| lgpl-2.1 |
GGoussar/scikit-image | doc/examples/edges/plot_canny.py | 16 | 1603 | """
===================
Canny edge detector
===================
The Canny filter is a multi-stage edge detector. It uses a filter based on the
derivative of a Gaussian in order to compute the intensity of the gradients.The
Gaussian reduces the effect of noise present in the image. Then, potential
edges are thinned down to 1-pixel curves by removing non-maximum pixels of the
gradient magnitude. Finally, edge pixels are kept or removed using hysteresis
thresholding on the gradient magnitude.
The Canny has three adjustable parameters: the width of the Gaussian (the
noisier the image, the greater the width), and the low and high threshold for
the hysteresis thresholding.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import feature
# Generate noisy image of a square
im = np.zeros((128, 128))
im[32:-32, 32:-32] = 1
im = ndi.rotate(im, 15, mode='constant')
im = ndi.gaussian_filter(im, 4)
im += 0.2 * np.random.random(im.shape)
# Compute the Canny filter for two values of sigma
edges1 = feature.canny(im)
edges2 = feature.canny(im, sigma=3)
# display results
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3),
sharex=True, sharey=True)
ax1.imshow(im, cmap=plt.cm.gray)
ax1.axis('off')
ax1.set_title('noisy image', fontsize=20)
ax2.imshow(edges1, cmap=plt.cm.gray)
ax2.axis('off')
ax2.set_title('Canny filter, $\sigma=1$', fontsize=20)
ax3.imshow(edges2, cmap=plt.cm.gray)
ax3.axis('off')
ax3.set_title('Canny filter, $\sigma=3$', fontsize=20)
fig.tight_layout()
plt.show()
| bsd-3-clause |
larsoner/mne-python | mne/viz/evoked.py | 3 | 106814 | # -*- coding: utf-8 -*-
"""Functions to plot evoked M/EEG data (besides topographies)."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
# Daniel McCloy <[email protected]>
#
# License: Simplified BSD
from copy import deepcopy
from functools import partial
from numbers import Integral
import numpy as np
from ..io.pick import (channel_type,
_VALID_CHANNEL_TYPES, channel_indices_by_type,
_DATA_CH_TYPES_SPLIT, _pick_inst, _get_channel_types,
_PICK_TYPES_DATA_DICT, _picks_to_idx, pick_info)
from ..defaults import _handle_default
from .utils import (_draw_proj_checkbox, tight_layout, _check_delayed_ssp,
plt_show, _process_times, DraggableColorbar, _setup_cmap,
_setup_vmin_vmax, _check_cov, _make_combine_callable,
_validate_if_list_of_axes, _triage_rank_sss,
_connection_line, _get_color_list, _setup_ax_spines,
_setup_plot_projector, _prepare_joint_axes, _check_option,
_set_title_multiple_electrodes, _check_time_unit,
_plot_masked_image, _trim_ticks, _set_window_title)
from ..utils import (logger, _clean_names, warn, _pl, verbose, _validate_type,
_check_if_nan, _check_ch_locs, fill_doc, _is_numeric)
from .topo import _plot_evoked_topo
from .topomap import (_prepare_topomap_plot, plot_topomap, _get_pos_outlines,
_draw_outlines, _prepare_topomap, _set_contour_locator,
_check_sphere, _make_head_outlines)
from ..channels.layout import _pair_grad_sensors, find_layout
def _butterfly_onpick(event, params):
"""Add a channel name on click."""
params['need_draw'] = True
ax = event.artist.axes
ax_idx = np.where([ax is a for a in params['axes']])[0]
if len(ax_idx) == 0: # this can happen if ax param is used
return # let the other axes handle it
else:
ax_idx = ax_idx[0]
lidx = np.where([
line is event.artist for line in params['lines'][ax_idx]])[0][0]
ch_name = params['ch_names'][params['idxs'][ax_idx][lidx]]
text = params['texts'][ax_idx]
x = event.artist.get_xdata()[event.ind[0]]
y = event.artist.get_ydata()[event.ind[0]]
text.set_x(x)
text.set_y(y)
text.set_text(ch_name)
text.set_color(event.artist.get_color())
text.set_alpha(1.)
text.set_zorder(len(ax.lines)) # to make sure it goes on top of the lines
text.set_path_effects(params['path_effects'])
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use on_button_press (happens once per click)
# to do the drawing
def _butterfly_on_button_press(event, params):
"""Only draw once for picking."""
if params['need_draw']:
event.canvas.draw()
else:
idx = np.where([event.inaxes is ax for ax in params['axes']])[0]
if len(idx) == 1:
text = params['texts'][idx[0]]
text.set_alpha(0.)
text.set_path_effects([])
event.canvas.draw()
params['need_draw'] = False
def _line_plot_onselect(xmin, xmax, ch_types, info, data, times, text=None,
psd=False, time_unit='s', sphere=None):
"""Draw topomaps from the selected area."""
import matplotlib.pyplot as plt
ch_types = [type_ for type_ in ch_types if type_ in ('eeg', 'grad', 'mag')]
if len(ch_types) == 0:
raise ValueError('Interactive topomaps only allowed for EEG '
'and MEG channels.')
if ('grad' in ch_types and
len(_pair_grad_sensors(info, topomap_coords=False,
raise_error=False)) < 2):
ch_types.remove('grad')
if len(ch_types) == 0:
return
vert_lines = list()
if text is not None:
text.set_visible(True)
ax = text.axes
vert_lines.append(ax.axvline(xmin, zorder=0, color='red'))
vert_lines.append(ax.axvline(xmax, zorder=0, color='red'))
fill = ax.axvspan(xmin, xmax, alpha=0.2, color='green')
evoked_fig = plt.gcf()
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
minidx = np.abs(times - xmin).argmin()
maxidx = np.abs(times - xmax).argmin()
fig, axarr = plt.subplots(1, len(ch_types), squeeze=False,
figsize=(3 * len(ch_types), 3))
for idx, ch_type in enumerate(ch_types):
if ch_type not in ('eeg', 'grad', 'mag'):
continue
picks, pos, merge_channels, _, ch_type, this_sphere, clip_origin = \
_prepare_topomap_plot(info, ch_type, sphere=sphere)
outlines = _make_head_outlines(this_sphere, pos, 'head', clip_origin)
if len(pos) < 2:
fig.delaxes(axarr[0][idx])
continue
this_data = data[picks, minidx:maxidx]
if merge_channels:
from ..channels.layout import _merge_ch_data
method = 'mean' if psd else 'rms'
this_data, _ = _merge_ch_data(this_data, ch_type, [],
method=method)
title = '%s %s' % (ch_type, method.upper())
else:
title = ch_type
this_data = np.average(this_data, axis=1)
axarr[0][idx].set_title(title)
vmin = min(this_data) if psd else None
vmax = max(this_data) if psd else None # All negative for dB psd.
cmap = 'Reds' if psd else None
plot_topomap(this_data, pos, cmap=cmap, vmin=vmin, vmax=vmax,
axes=axarr[0][idx], show=False, sphere=this_sphere,
outlines=outlines)
unit = 'Hz' if psd else time_unit
fig.suptitle('Average over %.2f%s - %.2f%s' % (xmin, unit, xmax, unit),
y=0.1)
tight_layout(pad=2.0, fig=fig)
plt_show()
if text is not None:
text.set_visible(False)
close_callback = partial(_topo_closed, ax=ax, lines=vert_lines,
fill=fill)
fig.canvas.mpl_connect('close_event', close_callback)
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
def _topo_closed(events, ax, lines, fill):
"""Remove lines from evoked plot as topomap is closed."""
for line in lines:
ax.lines.remove(line)
ax.patches.remove(fill)
ax.get_figure().canvas.draw()
def _rgb(x, y, z):
"""Transform x, y, z values into RGB colors."""
rgb = np.array([x, y, z]).T
rgb -= rgb.min(0)
rgb /= np.maximum(rgb.max(0), 1e-16) # avoid div by zero
return rgb
def _plot_legend(pos, colors, axis, bads, outlines, loc, size=30):
"""Plot (possibly colorized) channel legends for evoked plots."""
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
axis.get_figure().canvas.draw()
bbox = axis.get_window_extent() # Determine the correct size.
ratio = bbox.width / bbox.height
ax = inset_axes(axis, width=str(size / ratio) + '%',
height=str(size) + '%', loc=loc)
ax.set_adjustable('box')
ax.set_aspect('equal')
_prepare_topomap(pos, ax, check_nonzero=False)
pos_x, pos_y = pos.T
ax.scatter(pos_x, pos_y, color=colors, s=size * .8, marker='.', zorder=1)
if bads:
bads = np.array(bads)
ax.scatter(pos_x[bads], pos_y[bads], s=size / 6, marker='.',
color='w', zorder=1)
_draw_outlines(ax, outlines)
def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline,
units, scalings, titles, axes, plot_type, cmap=None,
gfp=False, window_title=None, spatial_colors=False,
selectable=True, zorder='unsorted',
noise_cov=None, colorbar=True, mask=None, mask_style=None,
mask_cmap=None, mask_alpha=.25, time_unit='s',
show_names=False, group_by=None, sphere=None):
"""Aux function for plot_evoked and plot_evoked_image (cf. docstrings).
Extra param is:
plot_type : str, value ('butterfly' | 'image')
The type of graph to plot: 'butterfly' plots each channel as a line
(x axis: time, y axis: amplitude). 'image' plots a 2D image where
color depicts the amplitude of each channel at a given time point
(x axis: time, y axis: channel). In 'image' mode, the plot is not
interactive.
"""
import matplotlib.pyplot as plt
# For evoked.plot_image ...
# First input checks for group_by and axes if any of them is not None.
# Either both must be dicts, or neither.
# If the former, the two dicts provide picks and axes to plot them to.
# Then, we call this function recursively for each entry in `group_by`.
if plot_type == "image" and isinstance(group_by, dict):
if axes is None:
axes = dict()
for sel in group_by:
plt.figure()
axes[sel] = plt.axes()
if not isinstance(axes, dict):
raise ValueError("If `group_by` is a dict, `axes` must be "
"a dict of axes or None.")
_validate_if_list_of_axes(list(axes.values()))
remove_xlabels = any([_is_last_row(ax) for ax in axes.values()])
for sel in group_by: # ... we loop over selections
if sel not in axes:
raise ValueError(sel + " present in `group_by`, but not "
"found in `axes`")
ax = axes[sel]
# the unwieldy dict comp below defaults the title to the sel
titles = ({channel_type(evoked.info, idx): sel
for idx in group_by[sel]} if titles is None else titles)
_plot_evoked(evoked, group_by[sel], exclude, unit, show, ylim,
proj, xlim, hline, units, scalings, titles,
ax, plot_type, cmap=cmap, gfp=gfp,
window_title=window_title,
selectable=selectable, noise_cov=noise_cov,
colorbar=colorbar, mask=mask,
mask_style=mask_style, mask_cmap=mask_cmap,
mask_alpha=mask_alpha, time_unit=time_unit,
show_names=show_names,
sphere=sphere)
if remove_xlabels and not _is_last_row(ax):
ax.set_xticklabels([])
ax.set_xlabel("")
ims = [ax.images[0] for ax in axes.values()]
clims = np.array([im.get_clim() for im in ims])
min, max = clims.min(), clims.max()
for im in ims:
im.set_clim(min, max)
figs = [ax.get_figure() for ax in axes.values()]
if len(set(figs)) == 1:
return figs[0]
else:
return figs
elif isinstance(axes, dict):
raise ValueError("If `group_by` is not a dict, "
"`axes` must not be a dict either.")
time_unit, times = _check_time_unit(time_unit, evoked.times)
evoked = evoked.copy() # we modify info
info = evoked.info
if axes is not None and proj == 'interactive':
raise RuntimeError('Currently only single axis figures are supported'
' for interactive SSP selection.')
if isinstance(gfp, str) and gfp != 'only':
raise ValueError('gfp must be boolean or "only". Got %s' % gfp)
scalings = _handle_default('scalings', scalings)
titles = _handle_default('titles', titles)
units = _handle_default('units', units)
picks = _picks_to_idx(info, picks, none='all', exclude=())
if len(picks) != len(set(picks)):
raise ValueError("`picks` are not unique. Please remove duplicates.")
bad_ch_idx = [info['ch_names'].index(ch) for ch in info['bads']
if ch in info['ch_names']]
if len(exclude) > 0:
if isinstance(exclude, str) and exclude == 'bads':
exclude = bad_ch_idx
elif (isinstance(exclude, list) and
all(isinstance(ch, str) for ch in exclude)):
exclude = [info['ch_names'].index(ch) for ch in exclude]
else:
raise ValueError(
'exclude has to be a list of channel names or "bads"')
picks = np.array([pick for pick in picks if pick not in exclude])
types = np.array(_get_channel_types(info, picks), str)
ch_types_used = list()
for this_type in _VALID_CHANNEL_TYPES:
if this_type in types:
ch_types_used.append(this_type)
fig = None
if axes is None:
fig, axes = plt.subplots(len(ch_types_used), 1)
fig.subplots_adjust(left=0.125, bottom=0.1, right=0.975, top=0.92,
hspace=0.63)
if isinstance(axes, plt.Axes):
axes = [axes]
fig.set_size_inches(6.4, 2 + len(axes))
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if window_title is not None:
_set_window_title(fig, window_title)
if len(axes) != len(ch_types_used):
raise ValueError('Number of axes (%g) must match number of channel '
'types (%d: %s)' % (len(axes), len(ch_types_used),
sorted(ch_types_used)))
_check_option('proj', proj, (True, False, 'interactive', 'reconstruct'))
noise_cov = _check_cov(noise_cov, info)
if proj == 'reconstruct' and noise_cov is not None:
raise ValueError('Cannot use proj="reconstruct" when noise_cov is not '
'None')
projector, whitened_ch_names = _setup_plot_projector(
info, noise_cov, proj=proj is True, nave=evoked.nave)
if len(whitened_ch_names) > 0:
unit = False
if projector is not None:
evoked.data[:] = np.dot(projector, evoked.data)
if proj == 'reconstruct':
evoked = evoked._reconstruct_proj()
if plot_type == 'butterfly':
_plot_lines(evoked.data, info, picks, fig, axes, spatial_colors, unit,
units, scalings, hline, gfp, types, zorder, xlim, ylim,
times, bad_ch_idx, titles, ch_types_used, selectable,
False, line_alpha=1., nave=evoked.nave,
time_unit=time_unit, sphere=sphere)
plt.setp(axes, xlabel='Time (%s)' % time_unit)
elif plot_type == 'image':
for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):
use_nave = evoked.nave if ai == 0 else None
this_picks = list(picks[types == this_type])
_plot_image(evoked.data, ax, this_type, this_picks, cmap, unit,
units, scalings, times, xlim, ylim, titles,
colorbar=colorbar, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha,
nave=use_nave, time_unit=time_unit,
show_names=show_names, ch_names=evoked.ch_names)
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(evoked=evoked, fig=fig, projs=info['projs'], axes=axes,
types=types, units=units, scalings=scalings, unit=unit,
ch_types_used=ch_types_used, picks=picks,
plot_update_proj_callback=_plot_update_evoked,
plot_type=plot_type)
_draw_proj_checkbox(None, params)
plt.setp(fig.axes[:len(ch_types_used) - 1], xlabel='')
fig.canvas.draw() # for axes plots update axes.
plt_show(show)
return fig
def _is_last_row(ax):
try:
return ax.get_subplotspec().is_last_row()
except AttributeError: # XXX old mpl
return ax.is_last_row()
def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units,
scalings, hline, gfp, types, zorder, xlim, ylim, times,
bad_ch_idx, titles, ch_types_used, selectable, psd,
line_alpha, nave, time_unit, sphere):
"""Plot data as butterfly plot."""
from matplotlib import patheffects, pyplot as plt
from matplotlib.widgets import SpanSelector
assert len(axes) == len(ch_types_used)
texts = list()
idxs = list()
lines = list()
sphere = _check_sphere(sphere, info)
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w",
alpha=0.75)]
if selectable:
selectables = np.ones(len(ch_types_used), dtype=bool)
for type_idx, this_type in enumerate(ch_types_used):
idx = picks[types == this_type]
if len(idx) < 2 or (this_type == 'grad' and len(idx) < 4):
# prevent unnecessary warnings for e.g. EOG
if this_type in _DATA_CH_TYPES_SPLIT:
logger.info('Need more than one channel to make '
'topography for %s. Disabling interactivity.'
% (this_type,))
selectables[type_idx] = False
if selectable:
# Parameters for butterfly interactive plots
params = dict(axes=axes, texts=texts, lines=lines,
ch_names=info['ch_names'], idxs=idxs, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):
line_list = list() # 'line_list' contains the lines for this axes
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
else:
this_scaling = 1. if scalings is None else scalings[this_type]
ch_unit = units[this_type]
idx = list(picks[types == this_type])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * data[idx, :]
_check_if_nan(D)
gfp_only = (isinstance(gfp, str) and gfp == 'only')
if not gfp_only:
chs = [info['chs'][i] for i in idx]
locs3d = np.array([ch['loc'][:3] for ch in chs])
if spatial_colors is True and not _check_ch_locs(chs):
warn('Channel locations not available. Disabling spatial '
'colors.')
spatial_colors = selectable = False
if spatial_colors is True and len(idx) != 1:
x, y, z = locs3d.T
colors = _rgb(x, y, z)
_handle_spatial_colors(colors, info, idx, this_type, psd,
ax, sphere)
else:
if isinstance(spatial_colors, (tuple, str)):
col = [spatial_colors]
else:
col = ['k']
colors = col * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = 'r'
if zorder == 'std':
# find the channels with the least activity
# to map them in front of the more active ones
z_ord = D.std(axis=1).argsort()
elif zorder == 'unsorted':
z_ord = list(range(D.shape[0]))
elif not callable(zorder):
error = ('`zorder` must be a function, "std" '
'or "unsorted", not {0}.')
raise TypeError(error.format(type(zorder)))
else:
z_ord = zorder(D)
# plot channels
for ch_idx, z in enumerate(z_ord):
line_list.append(
ax.plot(times, D[ch_idx], picker=True,
zorder=z + 1 if spatial_colors is True else 1,
color=colors[ch_idx], alpha=line_alpha,
linewidth=0.5)[0])
line_list[-1].set_pickradius(3.)
if gfp: # 'only' or boolean True
gfp_color = 3 * (0.,) if spatial_colors is True else (0., 1.,
0.)
this_gfp = np.sqrt((D * D).mean(axis=0))
this_ylim = ax.get_ylim() if (ylim is None or this_type not in
ylim.keys()) else ylim[this_type]
if gfp_only:
y_offset = 0.
else:
y_offset = this_ylim[0]
this_gfp += y_offset
ax.fill_between(times, y_offset, this_gfp, color='none',
facecolor=gfp_color, zorder=1, alpha=0.2)
line_list.append(ax.plot(times, this_gfp, color=gfp_color,
zorder=3, alpha=line_alpha)[0])
ax.text(times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
'GFP', zorder=4, color=gfp_color,
path_effects=gfp_path_effects)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors is True:
line.set_linestyle("--")
ax.set_ylabel(ch_unit)
texts.append(ax.text(0, 0, '', zorder=3,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0,
clip_on=True))
if xlim is not None:
if xlim == 'tight':
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and this_type in ylim:
ax.set_ylim(ylim[this_type])
ax.set(title=r'%s (%d channel%s)'
% (titles[this_type], len(D), _pl(len(D))))
if ai == 0:
_add_nave(ax, nave)
if hline is not None:
for h in hline:
c = ('grey' if spatial_colors is True else 'r')
ax.axhline(h, linestyle='--', linewidth=2, color=c)
lines.append(line_list)
if selectable:
for ax in np.array(axes)[selectables]:
if len(ax.lines) == 1:
continue
text = ax.annotate('Loading...', xy=(0.01, 0.1),
xycoords='axes fraction', fontsize=20,
color='green', zorder=3)
text.set_visible(False)
callback_onselect = partial(_line_plot_onselect,
ch_types=ch_types_used, info=info,
data=data, times=times, text=text,
psd=psd, time_unit=time_unit,
sphere=sphere)
blit = False if plt.get_backend() == 'MacOSX' else True
minspan = 0 if len(times) < 2 else times[1] - times[0]
ax._span_selector = SpanSelector(
ax, callback_onselect, 'horizontal', minspan=minspan,
useblit=blit, rectprops=dict(alpha=0.5, facecolor='red'))
def _add_nave(ax, nave):
"""Add nave to axes."""
if nave is not None:
ax.annotate(
r'N$_{\mathrm{ave}}$=%d' % nave, ha='left', va='bottom',
xy=(0, 1), xycoords='axes fraction',
xytext=(0, 5), textcoords='offset pixels')
def _handle_spatial_colors(colors, info, idx, ch_type, psd, ax, sphere):
"""Set up spatial colors."""
used_nm = np.array(_clean_names(info['ch_names']))[idx]
# find indices for bads
bads = [np.where(used_nm == bad)[0][0] for bad in info['bads'] if bad in
used_nm]
pos, outlines = _get_pos_outlines(info, idx, sphere=sphere)
loc = 1 if psd else 2 # Legend in top right for psd plot.
_plot_legend(pos, colors, ax, bads, outlines, loc)
def _plot_image(data, ax, this_type, picks, cmap, unit, units, scalings, times,
xlim, ylim, titles, colorbar=True, mask=None, mask_cmap=None,
mask_style=None, mask_alpha=.25, nave=None,
time_unit='s', show_names=False, ch_names=None):
"""Plot images."""
import matplotlib.pyplot as plt
assert time_unit is not None
if show_names == "auto":
if picks is not None:
show_names = "all" if len(picks) < 25 else True
else:
show_names = False
cmap = _setup_cmap(cmap)
ch_unit = units[this_type]
this_scaling = scalings[this_type]
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
if picks is not None:
data = data[picks]
if mask is not None:
mask = mask[picks]
# Show the image
# Set amplitude scaling
data = this_scaling * data
if ylim is None or this_type not in ylim:
vmax = np.abs(data).max()
vmin = -vmax
else:
vmin, vmax = ylim[this_type]
_check_if_nan(data)
im, t_end = _plot_masked_image(
ax, data, times, mask, yvals=None, cmap=cmap[0],
vmin=vmin, vmax=vmax, mask_style=mask_style, mask_alpha=mask_alpha,
mask_cmap=mask_cmap)
# ignore xlim='tight'; happens automatically with `extent` in imshow
xlim = None if xlim == 'tight' else xlim
if xlim is not None:
ax.set_xlim(xlim)
if colorbar:
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
ylabel = 'Channels' if show_names else 'Channel (index)'
t = titles[this_type] + ' (%d channel%s' % (len(data), _pl(data)) + t_end
ax.set(ylabel=ylabel, xlabel='Time (%s)' % (time_unit,), title=t)
_add_nave(ax, nave)
yticks = np.arange(len(picks))
if show_names != 'all':
yticks = np.intersect1d(np.round(ax.get_yticks()).astype(int), yticks)
yticklabels = np.array(ch_names)[picks] if show_names else np.array(picks)
ax.set(yticks=yticks, yticklabels=yticklabels[yticks])
@verbose
def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
ylim=None, xlim='tight', proj=False, hline=None, units=None,
scalings=None, titles=None, axes=None, gfp=False,
window_title=None, spatial_colors=False, zorder='unsorted',
selectable=True, noise_cov=None, time_unit='s', sphere=None,
verbose=None):
"""Plot evoked data using butterfly plots.
Left click to a line shows the channel name. Selecting an area by clicking
and holding left mouse button plots a topographic map of the painted area.
.. note:: If bad channels are not excluded they are shown in red.
Parameters
----------
evoked : instance of Evoked
The evoked data.
%(picks_all)s
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
ylim : dict | None
Y limits for plots (after scaling has been applied). e.g.
ylim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
X limits for plots.
%(plot_proj)s
hline : list of float | None
The values at which to show an horizontal line.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``dict(eeg='µV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
titles : dict | None
The titles associated with the channels. If None, defaults to
``dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')``.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
gfp : bool | 'only'
Plot GFP in green if True or "only". If "only", then the individual
channel traces will not be shown.
window_title : str | None
The title to put at the top of the figure.
spatial_colors : bool
If True, the lines are color coded by mapping physical sensor
coordinates into color values. Spatially similar channels will have
similar colors. Bad channels will be dotted. If False, the good
channels are plotted black and bad channels red. Defaults to False.
zorder : str | callable
Which channels to put in the front or back. Only matters if
``spatial_colors`` is used.
If str, must be ``std`` or ``unsorted`` (defaults to ``unsorted``). If
``std``, data with the lowest standard deviation (weakest effects) will
be put in front so that they are not obscured by those with stronger
effects. If ``unsorted``, channels are z-sorted as in the evoked
instance.
If callable, must take one argument: a numpy array of the same
dimensionality as the evoked raw data; and return a list of
unique integers corresponding to the number of channels.
.. versionadded:: 0.13.0
selectable : bool
Whether to use interactive features. If True (default), it is possible
to paint an area to draw topomaps. When False, the interactive features
are disabled. Disabling interactive features reduces memory consumption
and is useful when using ``axes`` parameter to draw multiaxes figures.
.. versionadded:: 0.13.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the butterfly plots.
See Also
--------
mne.viz.plot_evoked_white
"""
return _plot_evoked(
evoked=evoked, picks=picks, exclude=exclude, unit=unit, show=show,
ylim=ylim, proj=proj, xlim=xlim, hline=hline, units=units,
scalings=scalings, titles=titles, axes=axes, plot_type="butterfly",
gfp=gfp, window_title=window_title, spatial_colors=spatial_colors,
selectable=selectable, zorder=zorder, noise_cov=noise_cov,
time_unit=time_unit, sphere=sphere)
def plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_background=None,
merge_grads=False, legend=True, axes=None,
background_color='w', noise_cov=None, show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
color : list of color | color | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
Matplotlib borders style to be used for each sensor plot.
ylim : dict | None
Y limits for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad, misc.
If None, the ylim parameter for each channel is determined by
the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of float | None
The values at which to show a vertical line.
fig_background : None | ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
merge_grads : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
legend : bool | int | str | tuple
If True, create a legend based on evoked.comment. If False, disable the
legend. Otherwise, the legend is created and the parameter value is
passed as the location parameter to the matplotlib legend call. It can
be an integer (e.g. 0 corresponds to upper right corner of the plot),
a string (e.g. 'upper right'), or a tuple (x, y coordinates of the
lower left corner of the legend in the axes coordinate system).
See matplotlib documentation for more details.
axes : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
background_color : color
Background color. Typically 'k' (black) or 'w' (white; default).
.. versionadded:: 0.15.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channel names are shown in italic.
Can be a string to load a covariance from disk.
.. versionadded:: 0.16.0
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations.
"""
from matplotlib.colors import colorConverter
if not type(evoked) in (tuple, list):
evoked = [evoked]
dark_background = \
np.mean(colorConverter.to_rgb(background_color)) < 0.5
if dark_background:
fig_facecolor = background_color
axis_facecolor = background_color
font_color = 'w'
else:
fig_facecolor = background_color
axis_facecolor = background_color
font_color = 'k'
if color is None:
if dark_background:
color = ['w'] + _get_color_list()
else:
color = _get_color_list()
color = color * ((len(evoked) % len(color)) + 1)
color = color[:len(evoked)]
return _plot_evoked_topo(evoked=evoked, layout=layout,
layout_scale=layout_scale, color=color,
border=border, ylim=ylim, scalings=scalings,
title=title, proj=proj, vline=vline,
fig_facecolor=fig_facecolor,
fig_background=fig_background,
axis_facecolor=axis_facecolor,
font_color=font_color,
merge_channels=merge_grads,
legend=legend, axes=axes, show=show,
noise_cov=noise_cov)
@fill_doc
def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True,
show=True, clim=None, xlim='tight', proj=False,
units=None, scalings=None, titles=None, axes=None,
cmap='RdBu_r', colorbar=True, mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=.25,
time_unit='s', show_names="auto", group_by=None,
sphere=None):
"""Plot evoked data as images.
Parameters
----------
evoked : instance of Evoked
The evoked data.
%(picks_all)s
This parameter can also be used to set the order the channels
are shown in, as the channel image is sorted by the order of picks.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
clim : dict | None
Color limits for plots (after scaling has been applied). e.g.
``clim = dict(eeg=[-20, 20])``.
Valid keys are eeg, mag, grad, misc. If None, the clim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
X limits for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``dict(eeg='µV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
titles : dict | None
The titles associated with the channels. If None, defaults to
``dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')``.
axes : instance of Axes | list | dict | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
If ``group_by`` is a dict, this cannot be a list, but it can be a dict
of lists of axes, with the keys matching those of ``group_by``. In that
case, the provided axes will be used for the corresponding groups.
Defaults to ``None``.
cmap : matplotlib colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ``('RdBu_r', True)``.
Defaults to ``'RdBu_r'``.
colorbar : bool
If True, plot a colorbar. Defaults to True.
.. versionadded:: 0.16
mask : ndarray | None
An array of booleans of the same shape as the data. Entries of the
data that correspond to ``False`` in the mask are masked (see
``do_mask`` below). Useful for, e.g., masking for statistical
significance.
.. versionadded:: 0.16
mask_style : None | 'both' | 'contour' | 'mask'
If ``mask`` is not None: if 'contour', a contour line is drawn around
the masked areas (``True`` in ``mask``). If 'mask', entries not
``True`` in ``mask`` are shown transparently. If 'both', both a contour
and transparency are used.
If ``None``, defaults to 'both' if ``mask`` is not None, and is ignored
otherwise.
.. versionadded:: 0.16
mask_cmap : matplotlib colormap | (colormap, bool) | 'interactive'
The colormap chosen for masked parts of the image (see below), if
``mask`` is not ``None``. If None, ``cmap`` is reused. Defaults to
``Greys``. Not interactive. Otherwise, as ``cmap``.
mask_alpha : float
A float between 0 and 1. If ``mask`` is not None, this sets the
alpha level (degree of transparency) for the masked-out segments.
I.e., if 0, masked-out segments are not visible at all.
Defaults to .25.
.. versionadded:: 0.16
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
show_names : bool | 'auto' | 'all'
Determines if channel names should be plotted on the y axis. If False,
no names are shown. If True, ticks are set automatically by matplotlib
and the corresponding channel names are shown. If "all", all channel
names are shown. If "auto", is set to False if ``picks`` is ``None``,
to ``True`` if ``picks`` contains 25 or more entries, or to "all"
if ``picks`` contains fewer than 25 entries.
group_by : None | dict
If a dict, the values must be picks, and ``axes`` must also be a dict
with matching keys, or None. If ``axes`` is None, one figure and one
axis will be created for each entry in ``group_by``.Then, for each
entry, the picked channels will be plotted to the corresponding axis.
If ``titles`` are None, keys will become plot titles. This is useful
for e.g. ROIs. Each entry must contain only one channel type.
For example::
group_by=dict(Left_ROI=[1, 2, 3, 4], Right_ROI=[5, 6, 7, 8])
If None, all picked channels are plotted to the same axis.
%(topomap_sphere_auto)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the images.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=clim, proj=proj, xlim=xlim, hline=None,
units=units, scalings=scalings, titles=titles,
axes=axes, plot_type="image", cmap=cmap,
colorbar=colorbar, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha,
time_unit=time_unit, show_names=show_names,
group_by=group_by, sphere=sphere)
def _plot_update_evoked(params, bools):
"""Update the plot evoked lines."""
picks, evoked = [params[k] for k in ('picks', 'evoked')]
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = evoked.copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
for ax, t in zip(params['axes'], params['ch_types_used']):
this_scaling = params['scalings'][t]
idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
D = this_scaling * new_evoked.data[idx, :]
if params['plot_type'] == 'butterfly':
for line, di in zip(ax.lines, D):
line.set_ydata(di)
else:
ax.images[0].set_data(D)
params['fig'].canvas.draw()
@verbose
def plot_evoked_white(evoked, noise_cov, show=True, rank=None, time_unit='s',
sphere=None, axes=None, verbose=None):
"""Plot whitened evoked response.
Plots the whitened evoked response and the whitened GFP as described in
[1]_. This function is especially useful for investigating noise
covariance properties to determine if data are properly whitened (e.g.,
achieving expected values in line with model assumptions, see Notes below).
Parameters
----------
evoked : instance of mne.Evoked
The evoked response.
noise_cov : list | instance of Covariance | str
The noise covariance. Can be a string to load a covariance from disk.
show : bool
Show figure if True.
%(rank_None)s
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
%(topomap_sphere_auto)s
axes : list | None
List of axes to plot into.
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
See Also
--------
mne.Evoked.plot
Notes
-----
If baseline signals match the assumption of Gaussian white noise,
values should be centered at 0, and be within 2 standard deviations
(±1.96) for 95%% of the time points. For the global field power (GFP),
we expect it to fluctuate around a value of 1.
If one single covariance object is passed, the GFP panel (bottom)
will depict different sensor types. If multiple covariance objects are
passed as a list, the left column will display the whitened evoked
responses for each channel based on the whitener from the noise covariance
that has the highest log-likelihood. The left column will depict the
whitened GFPs based on each estimator separately for each sensor type.
Instead of numbers of channels the GFP display shows the estimated rank.
Note. The rank estimation will be printed by the logger
(if ``verbose=True``) for each noise covariance estimator that is passed.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG
signals, vol. 108, 328-342, NeuroImage.
"""
from ..cov import whiten_evoked, read_cov # recursive import
import matplotlib.pyplot as plt
time_unit, times = _check_time_unit(time_unit, evoked.times)
if isinstance(noise_cov, str):
noise_cov = read_cov(noise_cov)
if not isinstance(noise_cov, (list, tuple)):
noise_cov = [noise_cov]
evoked = evoked.copy() # handle ref meg
passive_idx = [idx for idx, proj in enumerate(evoked.info['projs'])
if not proj['active']]
# either applied already or not-- else issue
for idx in passive_idx[::-1]: # reverse order so idx does not change
evoked.del_proj(idx)
evoked.pick_types(ref_meg=False, exclude='bads', **_PICK_TYPES_DATA_DICT)
n_ch_used, rank_list, picks_list, has_sss = _triage_rank_sss(
evoked.info, noise_cov, rank, scalings=None)
if has_sss:
logger.info('SSS has been applied to data. Showing mag and grad '
'whitening jointly.')
# get one whitened evoked per cov
evokeds_white = [whiten_evoked(evoked, cov, picks=None, rank=r)
for cov, r in zip(noise_cov, rank_list)]
def whitened_gfp(x, rank=None):
"""Whitened Global Field Power.
The MNE inverse solver assumes zero mean whitened data as input.
Therefore, a chi^2 statistic will be best to detect model violations.
"""
return np.sum(x ** 2, axis=0) / (len(x) if rank is None else rank)
# prepare plot
if len(noise_cov) > 1:
n_columns = 2
n_extra_row = 0
else:
n_columns = 1
n_extra_row = 1
n_rows = n_ch_used + n_extra_row
want_shape = (n_rows, n_columns) if len(noise_cov) > 1 else (n_rows,)
_validate_type(axes, (list, tuple, np.ndarray, None), 'axes')
if axes is None:
_, axes = plt.subplots(n_rows,
n_columns, sharex=True, sharey=False,
figsize=(8.8, 2.2 * n_rows))
else:
axes = np.array(axes)
for ai, ax in enumerate(axes.flat):
_validate_type(ax, plt.Axes, 'axes.flat[%d]' % (ai,))
if axes.shape != want_shape:
raise ValueError(f'axes must have shape {want_shape}, got '
f'{axes.shape}')
fig = axes.flat[0].figure
if n_columns > 1:
suptitle = ('Whitened evoked (left, best estimator = "%s")\n'
'and global field power '
'(right, comparison of estimators)' %
noise_cov[0].get('method', 'empirical'))
fig.suptitle(suptitle)
if any(((n_columns == 1 and n_ch_used >= 1),
(n_columns == 2 and n_ch_used == 1))):
axes_evoked = axes[:n_ch_used]
ax_gfp = axes[-1:]
elif n_columns == 2 and n_ch_used > 1:
axes_evoked = axes[:n_ch_used, 0]
ax_gfp = axes[:, 1]
else:
raise RuntimeError('Wrong axes inputs')
titles_ = _handle_default('titles')
if has_sss:
titles_['meg'] = 'MEG (combined)'
colors = [plt.cm.Set1(i) for i in np.linspace(0, 0.5, len(noise_cov))]
ch_colors = _handle_default('color', None)
iter_gfp = zip(evokeds_white, noise_cov, rank_list, colors)
# the first is by law the best noise cov, on the left we plot that one.
if not has_sss:
evokeds_white[0].plot(unit=False, axes=axes_evoked,
hline=[-1.96, 1.96], show=False,
time_unit=time_unit)
else:
for ((ch_type, picks), ax) in zip(picks_list, axes_evoked):
ax.plot(times, evokeds_white[0].data[picks].T, color='k',
lw=0.5)
for hline in [-1.96, 1.96]:
ax.axhline(hline, color='red', linestyle='--', lw=2)
ax.set(title='%s (%d channel%s)'
% (titles_[ch_type], len(picks), _pl(len(picks))))
# Now plot the GFP for all covs if indicated.
for evoked_white, noise_cov, rank_, color in iter_gfp:
i = 0
for ch, sub_picks in picks_list:
this_rank = rank_[ch]
title = '{0} ({2}{1})'.format(
titles_[ch] if n_columns > 1 else ch,
this_rank, 'rank ' if n_columns > 1 else '')
label = noise_cov.get('method', 'empirical')
ax = ax_gfp[i]
ax.set_title(title if n_columns > 1 else
'Whitened GFP, method = "%s"' % label)
data = evoked_white.data[sub_picks]
gfp = whitened_gfp(data, rank=this_rank)
# Wrap SSS-processed data (MEG) to the mag color
color_ch = 'mag' if ch == 'meg' else ch
ax.plot(times, gfp,
label=label if n_columns > 1 else title,
color=color if n_columns > 1 else ch_colors[color_ch],
lw=0.5)
ax.set(xlabel='Time (%s)' % (time_unit,), ylabel=r'GFP ($\chi^2$)',
xlim=[times[0], times[-1]], ylim=(0, 10))
ax.axhline(1, color='red', linestyle='--', lw=2.)
if n_columns > 1:
i += 1
ax = ax_gfp[0]
if n_columns == 1:
ax.legend( # mpl < 1.2.1 compatibility: use prop instead of fontsize
loc='upper right', bbox_to_anchor=(0.98, 0.9), prop=dict(size=12))
else:
ax.legend(loc='upper right', prop=dict(size=10))
params = dict(top=[0.69, 0.82, 0.87][n_rows - 1],
bottom=[0.22, 0.13, 0.09][n_rows - 1])
if has_sss:
params['hspace'] = 0.49
fig.subplots_adjust(**params)
fig.canvas.draw()
plt_show(show)
return fig
@verbose
def plot_snr_estimate(evoked, inv, show=True, axes=None, verbose=None):
"""Plot a data SNR estimate.
Parameters
----------
evoked : instance of Evoked
The evoked instance. This should probably be baseline-corrected.
inv : instance of InverseOperator
The minimum-norm inverse operator.
show : bool
Show figure if True.
axes : instance of Axes | None
The axes to plot into.
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
The bluish green line is the SNR determined by the GFP of the whitened
evoked data. The orange line is the SNR estimated based on the mismatch
between the data and the data re-estimated from the regularized inverse.
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
from ..minimum_norm import estimate_snr
snr, snr_est = estimate_snr(evoked, inv)
_validate_type(axes, (None, plt.Axes))
if axes is None:
_, ax = plt.subplots(1, 1)
else:
ax = axes
del axes
fig = ax.figure
lims = np.concatenate([evoked.times[[0, -1]], [-1, snr_est.max()]])
ax.axvline(0, color='k', ls=':', lw=1)
ax.axhline(0, color='k', ls=':', lw=1)
# Colors are "bluish green" and "vermilion" taken from:
# http://bconnelly.net/2013/10/creating-colorblind-friendly-figures/
hs = list()
labels = ('Inverse', 'Whitened GFP')
hs.append(ax.plot(
evoked.times, snr_est, color=[0.0, 0.6, 0.5])[0])
hs.append(ax.plot(
evoked.times, snr - 1, color=[0.8, 0.4, 0.0])[0])
ax.set(xlim=lims[:2], ylim=lims[2:], ylabel='SNR',
xlabel='Time (s)')
if evoked.comment is not None:
ax.set_title(evoked.comment)
ax.legend(hs, labels, title='Estimation method')
plt_show(show)
return fig
@fill_doc
def plot_evoked_joint(evoked, times="peaks", title='', picks=None,
exclude=None, show=True, ts_args=None,
topomap_args=None):
"""Plot evoked data as butterfly plot and add topomaps for time points.
.. note:: Axes to plot in can be passed by the user through ``ts_args`` or
``topomap_args``. In that case both ``ts_args`` and
``topomap_args`` axes have to be used. Be aware that when the
axes are provided, their position may be slightly modified.
Parameters
----------
evoked : instance of Evoked
The evoked instance.
times : float | array of float | "auto" | "peaks"
The time point(s) to plot. If ``"auto"``, 5 evenly spaced topographies
between the first and last time instant will be shown. If ``"peaks"``,
finds time points automatically by checking for 3 local maxima in
Global Field Power. Defaults to ``"peaks"``.
title : str | None
The title. If ``None``, suppress printing channel type title. If an
empty string, a default title is created. Defaults to ''. If custom
axes are passed make sure to set ``title=None``, otherwise some of your
axes may be removed during placement of the title axis.
%(picks_all)s
exclude : None | list of str | 'bads'
Channels names to exclude from being shown. If ``'bads'``, the
bad channels are excluded. Defaults to ``None``.
show : bool
Show figure if ``True``. Defaults to ``True``.
ts_args : None | dict
A dict of ``kwargs`` that are forwarded to :meth:`mne.Evoked.plot` to
style the butterfly plot. If they are not in this dict, the following
defaults are passed: ``spatial_colors=True``, ``zorder='std'``.
``show`` and ``exclude`` are illegal.
If ``None``, no customizable arguments will be passed.
Defaults to ``None``.
topomap_args : None | dict
A dict of ``kwargs`` that are forwarded to
:meth:`mne.Evoked.plot_topomap` to style the topomaps.
If it is not in this dict, ``outlines='skirt'`` will be passed.
``show``, ``times``, ``colorbar`` are illegal.
If ``None``, no customizable arguments will be passed.
Defaults to ``None``.
Returns
-------
fig : instance of matplotlib.figure.Figure | list
The figure object containing the plot. If ``evoked`` has multiple
channel types, a list of figures, one for each channel type, is
returned.
Notes
-----
.. versionadded:: 0.12.0
"""
import matplotlib.pyplot as plt
if ts_args is not None and not isinstance(ts_args, dict):
raise TypeError('ts_args must be dict or None, got type %s'
% (type(ts_args),))
ts_args = dict() if ts_args is None else ts_args.copy()
ts_args['time_unit'], _ = _check_time_unit(
ts_args.get('time_unit', 's'), evoked.times)
topomap_args = dict() if topomap_args is None else topomap_args.copy()
got_axes = False
illegal_args = {"show", 'times', 'exclude'}
for args in (ts_args, topomap_args):
if any((x in args for x in illegal_args)):
raise ValueError("Don't pass any of {} as *_args.".format(
", ".join(list(illegal_args))))
if ("axes" in ts_args) or ("axes" in topomap_args):
if not (("axes" in ts_args) and ("axes" in topomap_args)):
raise ValueError("If one of `ts_args` and `topomap_args` contains "
"'axes', the other must, too.")
_validate_if_list_of_axes([ts_args["axes"]], 1)
n_topomaps = (3 if times is None else len(times)) + 1
_validate_if_list_of_axes(list(topomap_args["axes"]), n_topomaps)
got_axes = True
# channel selection
# simply create a new evoked object with the desired channel selection
# Need to deal with proj before picking to avoid bad projections
proj = topomap_args.get('proj', True)
proj_ts = ts_args.get('proj', True)
if proj_ts != proj:
raise ValueError(
f'topomap_args["proj"] (default True, got {proj}) must match '
f'ts_args["proj"] (default True, got {proj_ts})')
_check_option('topomap_args["proj"]', proj, (True, False, 'reconstruct'))
evoked = evoked.copy()
if proj:
evoked.apply_proj()
if proj == 'reconstruct':
evoked._reconstruct_proj()
topomap_args['proj'] = ts_args['proj'] = False # don't reapply
evoked = _pick_inst(evoked, picks, exclude, copy=False)
info = evoked.info
ch_types = _get_channel_types(info, unique=True, only_data_chs=True)
# if multiple sensor types: one plot per channel type, recursive call
if len(ch_types) > 1:
if got_axes:
raise NotImplementedError(
"Currently, passing axes manually (via `ts_args` or "
"`topomap_args`) is not supported for multiple channel types.")
figs = list()
for this_type in ch_types: # pick only the corresponding channel type
ev_ = evoked.copy().pick_channels(
[info['ch_names'][idx] for idx in range(info['nchan'])
if channel_type(info, idx) == this_type])
if len(_get_channel_types(ev_.info, unique=True)) > 1:
raise RuntimeError('Possibly infinite loop due to channel '
'selection problem. This should never '
'happen! Please check your channel types.')
figs.append(
plot_evoked_joint(
ev_, times=times, title=title, show=show, ts_args=ts_args,
exclude=list(), topomap_args=topomap_args))
return figs
# set up time points to show topomaps for
times_sec = _process_times(evoked, times, few=True)
del times
_, times_ts = _check_time_unit(ts_args['time_unit'], times_sec)
# prepare axes for topomap
if not got_axes:
fig, ts_ax, map_ax, cbar_ax = _prepare_joint_axes(len(times_sec),
figsize=(8.0, 4.2))
else:
ts_ax = ts_args["axes"]
del ts_args["axes"]
map_ax = topomap_args["axes"][:-1]
cbar_ax = topomap_args["axes"][-1]
del topomap_args["axes"]
fig = cbar_ax.figure
# butterfly/time series plot
# most of this code is about passing defaults on demand
ts_args_def = dict(picks=None, unit=True, ylim=None, xlim='tight',
proj=False, hline=None, units=None, scalings=None,
titles=None, gfp=False, window_title=None,
spatial_colors=True, zorder='std',
sphere=None)
ts_args_def.update(ts_args)
_plot_evoked(evoked, axes=ts_ax, show=False, plot_type='butterfly',
exclude=[], **ts_args_def)
# handle title
# we use a new axis for the title to handle scaling of plots
old_title = ts_ax.get_title()
ts_ax.set_title('')
if title is not None:
title_ax = plt.subplot(4, 3, 2)
if title == '':
title = old_title
title_ax.text(.5, .5, title, transform=title_ax.transAxes,
horizontalalignment='center',
verticalalignment='center')
title_ax.axis('off')
# topomap
contours = topomap_args.get('contours', 6)
ch_type = ch_types.pop() # set should only contain one element
# Since the data has all the ch_types, we get the limits from the plot.
vmin, vmax = ts_ax.get_ylim()
norm = ch_type == 'grad'
vmin = 0 if norm else vmin
vmin, vmax = _setup_vmin_vmax(evoked.data, vmin, vmax, norm)
if not isinstance(contours, (list, np.ndarray)):
locator, contours = _set_contour_locator(vmin, vmax, contours)
else:
locator = None
topomap_args_pass = topomap_args.copy()
topomap_args_pass['outlines'] = topomap_args.get('outlines', 'skirt')
topomap_args_pass['contours'] = contours
evoked.plot_topomap(times=times_sec, axes=map_ax, show=False,
colorbar=False, **topomap_args_pass)
if topomap_args.get('colorbar', True):
from matplotlib import ticker
cbar = plt.colorbar(map_ax[0].images[0], cax=cbar_ax)
if isinstance(contours, (list, np.ndarray)):
cbar.set_ticks(contours)
else:
if locator is None:
locator = ticker.MaxNLocator(nbins=5)
cbar.locator = locator
cbar.update_ticks()
if not got_axes:
plt.subplots_adjust(left=.1, right=.93, bottom=.14,
top=1. if title is not None else 1.2)
# connection lines
# draw the connection lines between time series and topoplots
lines = [_connection_line(timepoint, fig, ts_ax, map_ax_)
for timepoint, map_ax_ in zip(times_ts, map_ax)]
for line in lines:
fig.lines.append(line)
# mark times in time series plot
for timepoint in times_ts:
ts_ax.axvline(timepoint, color='grey', linestyle='-',
linewidth=1.5, alpha=.66, zorder=0)
# show and return it
plt_show(show)
return fig
###############################################################################
# The following functions are all helpers for plot_compare_evokeds. #
###############################################################################
def _check_loc_legal(loc, what='your choice', default=1):
"""Check if loc is a legal location for MPL subordinate axes."""
true_default = {"legend": 2, "show_sensors": 1}.get(what, default)
if isinstance(loc, (bool, np.bool_)) and loc:
loc = true_default
loc_dict = {'upper right': 1, 'upper left': 2, 'lower left': 3,
'lower right': 4, 'right': 5, 'center left': 6,
'center right': 7, 'lower center': 8, 'upper center': 9,
'center': 10}
loc_ = loc_dict.get(loc, loc)
if loc_ not in range(11):
raise ValueError(str(loc) + " is not a legal MPL loc, please supply"
"another value for " + what + ".")
return loc_
def _validate_style_keys_pce(styles, conditions, tags):
"""Validate styles dict keys for plot_compare_evokeds."""
styles = deepcopy(styles)
if not set(styles).issubset(tags.union(conditions)):
raise ValueError('The keys in "styles" ({}) must match the keys in '
'"evokeds" ({}).'.format(list(styles), conditions))
# make sure all the keys are in there
for cond in conditions:
if cond not in styles:
styles[cond] = dict()
# deal with matplotlib's synonymous handling of "c" and "color" /
# "ls" and "linestyle" / "lw" and "linewidth"
elif 'c' in styles[cond]:
styles[cond]['color'] = styles[cond].pop('c')
elif 'ls' in styles[cond]:
styles[cond]['linestyle'] = styles[cond].pop('ls')
elif 'lw' in styles[cond]:
styles[cond]['linewidth'] = styles[cond].pop('lw')
# transfer styles from partial-matched entries
for tag in cond.split('/'):
if tag in styles:
styles[cond].update(styles[tag])
# remove the (now transferred) partial-matching style entries
for key in list(styles):
if key not in conditions:
del styles[key]
return styles
def _validate_colors_pce(colors, cmap, conditions, tags):
"""Check and assign colors for plot_compare_evokeds."""
err_suffix = ''
if colors is None:
if cmap is None:
colors = _get_color_list()
err_suffix = ' in the default color cycle'
else:
colors = list(range(len(conditions)))
# convert color list to dict
if isinstance(colors, (list, tuple, np.ndarray)):
if len(conditions) > len(colors):
raise ValueError('Trying to plot {} conditions, but there are only'
' {} colors{}. Please specify colors manually.'
.format(len(conditions), len(colors), err_suffix))
colors = dict(zip(conditions, colors))
# should be a dict by now...
if not isinstance(colors, dict):
raise TypeError('"colors" must be a dict, list, or None; got {}.'
.format(type(colors).__name__))
# validate color dict keys
if not set(colors).issubset(tags.union(conditions)):
raise ValueError('If "colors" is a dict its keys ({}) must '
'match the keys/conditions in "evokeds" ({}).'
.format(list(colors), conditions))
# validate color dict values
color_vals = list(colors.values())
all_numeric = all(_is_numeric(_color) for _color in color_vals)
if cmap is not None and not all_numeric:
raise TypeError('if "cmap" is specified, then "colors" must be '
'None or a (list or dict) of (ints or floats); got {}.'
.format(', '.join(color_vals)))
# convert provided ints to sequential, rank-ordered ints
all_int = all([isinstance(_color, Integral) for _color in color_vals])
if all_int:
colors = deepcopy(colors)
ranks = {val: ix for ix, val in enumerate(sorted(set(color_vals)))}
for key, orig_int in colors.items():
colors[key] = ranks[orig_int]
# if no cmap, convert color ints to real colors
if cmap is None:
color_list = _get_color_list()
for cond, color_int in colors.items():
colors[cond] = color_list[color_int]
# recompute color_vals as a sorted set (we'll need it that way later)
color_vals = set(colors.values())
if all_numeric:
color_vals = sorted(color_vals)
return colors, color_vals
def _validate_cmap_pce(cmap, colors, color_vals):
"""Check and assign colormap for plot_compare_evokeds."""
from matplotlib.cm import get_cmap
from matplotlib.colors import Colormap
all_int = all([isinstance(_color, Integral) for _color in color_vals])
lut = len(color_vals) if all_int else None
colorbar_title = ''
if isinstance(cmap, (list, tuple, np.ndarray)) and len(cmap) == 2:
colorbar_title, cmap = cmap
if isinstance(cmap, str):
cmap = get_cmap(cmap, lut=lut)
elif isinstance(cmap, Colormap) and all_int:
cmap = cmap._resample(lut)
return cmap, colorbar_title
def _validate_linestyles_pce(linestyles, conditions, tags):
"""Check and assign linestyles for plot_compare_evokeds."""
# make linestyles a list if it's not defined
if linestyles is None:
linestyles = [None] * len(conditions) # will get changed to defaults
# convert linestyle list to dict
if isinstance(linestyles, (list, tuple, np.ndarray)):
if len(conditions) > len(linestyles):
raise ValueError('Trying to plot {} conditions, but there are '
'only {} linestyles. Please specify linestyles '
'manually.'
.format(len(conditions), len(linestyles)))
linestyles = dict(zip(conditions, linestyles))
# should be a dict by now...
if not isinstance(linestyles, dict):
raise TypeError('"linestyles" must be a dict, list, or None; got {}.'
.format(type(linestyles).__name__))
# validate linestyle dict keys
if not set(linestyles).issubset(tags.union(conditions)):
raise ValueError('If "linestyles" is a dict its keys ({}) must '
'match the keys/conditions in "evokeds" ({}).'
.format(list(linestyles), conditions))
# normalize linestyle values (so we can accurately count unique linestyles
# later). See https://github.com/matplotlib/matplotlib/blob/master/matplotlibrc.template#L131-L133 # noqa
linestyle_map = {'solid': (0, ()),
'dotted': (0, (1., 1.65)),
'dashed': (0, (3.7, 1.6)),
'dashdot': (0, (6.4, 1.6, 1., 1.6)),
'-': (0, ()),
':': (0, (1., 1.65)),
'--': (0, (3.7, 1.6)),
'-.': (0, (6.4, 1.6, 1., 1.6))}
for cond, _ls in linestyles.items():
linestyles[cond] = linestyle_map.get(_ls, _ls)
return linestyles
def _populate_style_dict_pce(condition, condition_styles, style_name,
style_dict, cmap):
"""Transfer styles into condition_styles dict for plot_compare_evokeds."""
defaults = dict(color='gray', linestyle=(0, ())) # (0, ()) == 'solid'
# if condition X doesn't yet have style Y defined:
if condition_styles.get(style_name, None) is None:
# check the style dict for the full condition name
try:
condition_styles[style_name] = style_dict[condition]
# if it's not in there, try the slash-separated condition tags
except KeyError:
for tag in condition.split('/'):
try:
condition_styles[style_name] = style_dict[tag]
# if the tag's not in there, assign a default value (but also
# continue looping in search of a tag that *is* in there)
except KeyError:
condition_styles[style_name] = defaults[style_name]
# if we found a valid tag, keep track of it for colorbar
# legend purposes, and also stop looping (so we don't overwrite
# a valid tag's style with an invalid tag → default style)
else:
if style_name == 'color' and cmap is not None:
condition_styles['cmap_label'] = tag
break
return condition_styles
def _handle_styles_pce(styles, linestyles, colors, cmap, conditions):
"""Check and assign styles for plot_compare_evokeds."""
styles = deepcopy(styles)
# validate style dict structure (doesn't check/assign values yet)
tags = set(tag for cond in conditions for tag in cond.split('/'))
if styles is None:
styles = {cond: dict() for cond in conditions}
styles = _validate_style_keys_pce(styles, conditions, tags)
# validate color dict
colors, color_vals = _validate_colors_pce(colors, cmap, conditions, tags)
all_int = all([isinstance(_color, Integral) for _color in color_vals])
# instantiate cmap
cmap, colorbar_title = _validate_cmap_pce(cmap, colors, color_vals)
# validate linestyles
linestyles = _validate_linestyles_pce(linestyles, conditions, tags)
# prep for colorbar tick handling
colorbar_ticks = None if cmap is None else dict()
# array mapping color integers (indices) to tick locations (array values)
tick_locs = np.linspace(0, 1, 2 * len(color_vals) + 1)[1::2]
# transfer colors/linestyles dicts into styles dict; fall back on defaults
color_and_linestyle = dict(color=colors, linestyle=linestyles)
for cond, cond_styles in styles.items():
for _name, _style in color_and_linestyle.items():
cond_styles = _populate_style_dict_pce(cond, cond_styles, _name,
_style, cmap)
# convert numeric colors into cmap color values; store colorbar ticks
if cmap is not None:
color_number = cond_styles['color']
cond_styles['color'] = cmap(color_number)
tick_loc = tick_locs[color_number] if all_int else color_number
key = cond_styles.pop('cmap_label', cond)
colorbar_ticks[key] = tick_loc
return styles, linestyles, colors, cmap, colorbar_title, colorbar_ticks
def _evoked_sensor_legend(info, picks, ymin, ymax, show_sensors, ax,
sphere):
"""Show sensor legend (location of a set of sensors on the head)."""
if show_sensors is True:
ymin, ymax = np.abs(ax.get_ylim())
show_sensors = "lower right" if ymin > ymax else "upper right"
pos, outlines = _get_pos_outlines(info, picks, sphere=sphere)
show_sensors = _check_loc_legal(show_sensors, "show_sensors")
_plot_legend(pos, ["k"] * len(picks), ax, list(), outlines,
show_sensors, size=25)
def _draw_colorbar_pce(ax, colors, cmap, colorbar_title, colorbar_ticks):
"""Draw colorbar for plot_compare_evokeds."""
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colorbar import ColorbarBase
from matplotlib.transforms import Bbox
# create colorbar axes
orig_bbox = ax.get_position()
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.1)
cax.yaxis.tick_right()
cb = ColorbarBase(cax, cmap=cmap, norm=None, orientation='vertical')
cb.set_label(colorbar_title)
# handle ticks
ticks = sorted(set(colorbar_ticks.values()))
ticklabels = [''] * len(ticks)
for label, tick in colorbar_ticks.items():
idx = ticks.index(tick)
if len(ticklabels[idx]): # handle labels with the same color/location
ticklabels[idx] = '\n'.join([ticklabels[idx], label])
else:
ticklabels[idx] = label
assert all(len(label) for label in ticklabels)
cb.set_ticks(ticks)
cb.set_ticklabels(ticklabels)
# shrink colorbar if discrete colors
color_vals = set(colors.values())
if all([isinstance(_color, Integral) for _color in color_vals]):
fig = ax.get_figure()
fig.canvas.draw()
fig_aspect = np.divide(*fig.get_size_inches())
new_bbox = ax.get_position()
cax_width = 0.75 * (orig_bbox.xmax - new_bbox.xmax)
# add extra space for multiline colorbar labels
h_mult = max(2, max([len(label.split('\n')) for label in ticklabels]))
cax_height = len(color_vals) * h_mult * cax_width / fig_aspect
x0 = orig_bbox.xmax - cax_width
y0 = (new_bbox.ymax + new_bbox.ymin - cax_height) / 2
x1 = orig_bbox.xmax
y1 = y0 + cax_height
new_bbox = Bbox([[x0, y0], [x1, y1]])
cax.set_axes_locator(None)
cax.set_position(new_bbox)
def _draw_legend_pce(legend, split_legend, styles, linestyles, colors, cmap,
do_topo, ax):
"""Draw legend for plot_compare_evokeds."""
import matplotlib.lines as mlines
lines = list()
# triage
if split_legend is None:
split_legend = cmap is not None
n_colors = len(set(colors.values()))
n_linestyles = len(set(linestyles.values()))
draw_styles = cmap is None and not split_legend
draw_colors = cmap is None and split_legend and n_colors > 1
draw_linestyles = (cmap is None or split_legend) and n_linestyles > 1
# create the fake lines for the legend
if draw_styles:
for label, cond_styles in styles.items():
line = mlines.Line2D([], [], label=label, **cond_styles)
lines.append(line)
else:
if draw_colors:
for label, color in colors.items():
line = mlines.Line2D([], [], label=label, linestyle='solid',
color=color)
lines.append(line)
if draw_linestyles:
for label, linestyle in linestyles.items():
line = mlines.Line2D([], [], label=label, linestyle=linestyle,
color='black')
lines.append(line)
# legend params
ncol = 1 + (len(lines) // 5)
loc = _check_loc_legal(legend, 'legend')
legend_params = dict(loc=loc, frameon=True, ncol=ncol)
# special placement (above dedicated legend axes) in topoplot
if do_topo and isinstance(legend, bool):
legend_params.update(loc='lower right', bbox_to_anchor=(1, 1))
# draw the legend
if any([draw_styles, draw_colors, draw_linestyles]):
labels = [line.get_label() for line in lines]
ax.legend(lines, labels, **legend_params)
def _draw_axes_pce(ax, ymin, ymax, truncate_yaxis, truncate_xaxis, invert_y,
vlines, tmin, tmax, unit, skip_axlabel=True):
"""Position, draw, and truncate axes for plot_compare_evokeds."""
# avoid matplotlib errors
if ymin == ymax:
ymax += 1e-15
if tmin == tmax:
tmax += 1e-9
ax.set_xlim(tmin, tmax)
# for dark backgrounds:
ax.patch.set_alpha(0)
if not np.isfinite([ymin, ymax]).all(): # nothing plotted
return
ax.set_ylim(ymin, ymax)
ybounds = (ymin, ymax)
# determine ymin/ymax for spine truncation
trunc_y = True if truncate_yaxis == 'auto' else truncate_yaxis
if truncate_yaxis:
if isinstance(truncate_yaxis, bool):
# truncate to half the max abs. value and round to a nice-ish
# number. ylims are already symmetric about 0 or have a lower bound
# of 0, so div. by 2 should suffice.
ybounds = np.array([ymin, ymax]) / 2.
precision = 0.25
ybounds = np.round(ybounds / precision) * precision
elif truncate_yaxis == 'auto':
# truncate to existing max/min ticks
ybounds = _trim_ticks(ax.get_yticks(), ymin, ymax)[[0, -1]]
else:
raise ValueError('"truncate_yaxis" must be bool or '
'"auto", got {}'.format(truncate_yaxis))
_setup_ax_spines(ax, vlines, tmin, tmax, ybounds[0], ybounds[1], invert_y,
unit, truncate_xaxis, trunc_y, skip_axlabel)
def _get_data_and_ci(evoked, combine, combine_func, picks, scaling=1,
ci_fun=None):
"""Compute (sensor-aggregated, scaled) time series and possibly CI."""
picks = np.array(picks).flatten()
# apply scalings
data = np.array([evk.data[picks] * scaling for evk in evoked])
# combine across sensors
if combine is not None:
logger.info('combining channels using "{}"'.format(combine))
data = combine_func(data)
# get confidence band
if ci_fun is not None:
ci = ci_fun(data)
# get grand mean across evokeds
data = np.mean(data, axis=0)
_check_if_nan(data)
return (data,) if ci_fun is None else (data, ci)
def _get_ci_function_pce(ci, do_topo=False):
"""Get confidence interval function for plot_compare_evokeds."""
if ci is None:
return None
elif callable(ci):
return ci
elif isinstance(ci, bool) and not ci:
return None
elif isinstance(ci, bool):
ci = 0.95
if isinstance(ci, float):
from ..stats import _ci
method = 'parametric' if do_topo else 'bootstrap'
return partial(_ci, ci=ci, method=method)
else:
raise TypeError('"ci" must be None, bool, float or callable, got {}'
.format(type(ci).__name__))
def _plot_compare_evokeds(ax, data_dict, conditions, times, ci_dict, styles,
title, all_positive, topo):
"""Plot evokeds (to compare them; with CIs) based on a data_dict."""
for condition in conditions:
# plot the actual data ('dat') as a line
dat = data_dict[condition].T
ax.plot(times, dat, zorder=1000, label=condition, clip_on=False,
**styles[condition])
# plot the confidence interval if available
if ci_dict.get(condition, None) is not None:
ci_ = ci_dict[condition]
ax.fill_between(times, ci_[0].flatten(), ci_[1].flatten(),
zorder=9, color=styles[condition]['color'],
alpha=0.3, clip_on=False)
if topo:
ax.text(-.1, 1, title, transform=ax.transAxes)
else:
ax.set_title(title)
def _title_helper_pce(title, picked_types, picks, ch_names, combine):
"""Format title for plot_compare_evokeds."""
if title is None:
title = (_handle_default('titles').get(picks, None) if picked_types
else _set_title_multiple_electrodes(title, combine, ch_names))
# add the `combine` modifier
do_combine = picked_types or len(ch_names) > 1
if (title is not None and len(title) and isinstance(combine, str) and
do_combine):
_comb = combine.upper() if combine == 'gfp' else combine
_comb = 'std. dev.' if _comb == 'std' else _comb
title += ' ({})'.format(_comb)
return title
@fill_doc
def plot_compare_evokeds(evokeds, picks=None, colors=None,
linestyles=None, styles=None, cmap=None,
vlines='auto', ci=True, truncate_yaxis='auto',
truncate_xaxis=True, ylim=None, invert_y=False,
show_sensors=None, legend=True,
split_legend=None, axes=None, title=None, show=True,
combine=None, sphere=None):
"""Plot evoked time courses for one or more conditions and/or channels.
Parameters
----------
evokeds : instance of mne.Evoked | list | dict
If a single Evoked instance, it is plotted as a time series.
If a list of Evokeds, the contents are plotted with their
``.comment`` attributes used as condition labels. If no comment is set,
the index of the respective Evoked the list will be used instead,
starting with ``1`` for the first Evoked.
If a dict whose values are Evoked objects, the contents are plotted as
single time series each and the keys are used as labels.
If a [dict/list] of lists, the unweighted mean is plotted as a time
series and the parametric confidence interval is plotted as a shaded
area. All instances must have the same shape - channel numbers, time
points etc.
If dict, keys must be of type str.
%(picks_all_data)s
* If picks is None or a (collection of) data channel types, the
global field power will be plotted for all data channels.
Otherwise, picks will be averaged.
* If multiple channel types are selected, one
figure will be returned for each channel type.
* If the selected channels are gradiometers, the signal from
corresponding (gradiometer) pairs will be combined.
colors : list | dict | None
Colors to use when plotting the ERP/F lines and confidence bands. If
``cmap`` is not ``None``, ``colors`` must be a :class:`list` or
:class:`dict` of :class:`ints <int>` or :class:`floats <float>`
indicating steps or percentiles (respectively) along the colormap. If
``cmap`` is ``None``, list elements or dict values of ``colors`` must
be :class:`ints <int>` or valid :doc:`matplotlib colors
<tutorials/colors/colors>`; lists are cycled through sequentially,
while dicts must have keys matching the keys or conditions of an
``evokeds`` dict (see Notes for details). If ``None``, the current
:doc:`matplotlib color cycle <gallery/color/color_cycle_default>` is
used. Defaults to ``None``.
linestyles : list | dict | None
Styles to use when plotting the ERP/F lines. If a :class:`list` or
:class:`dict`, elements must be valid :doc:`matplotlib linestyles
<matplotlib:gallery/lines_bars_and_markers/linestyles>`. Lists are
cycled through sequentially; dictionaries must have keys matching the
keys or conditions of an ``evokeds`` dict (see Notes for details). If
``None``, all lines will be solid. Defaults to ``None``.
styles : dict | None
Dictionary of styles to use when plotting ERP/F lines. Keys must match
keys or conditions of ``evokeds``, and values must be a :class:`dict`
of legal inputs to :func:`matplotlib.pyplot.plot`. Those values will be
passed as parameters to the line plot call of the corresponding
condition, overriding defaults (e.g.,
``styles={"Aud/L": {"linewidth": 3}}`` will set the linewidth for
"Aud/L" to 3). As with ``colors`` and ``linestyles``, keys matching
conditions in ``/``-separated ``evokeds`` keys are supported (see Notes
for details).
cmap : None | str | tuple | instance of matplotlib.colors.Colormap
Colormap from which to draw color values when plotting the ERP/F lines
and confidence bands. If not ``None``, ints or floats in the ``colors``
parameter are mapped to steps or percentiles (respectively) along the
colormap. If ``cmap`` is a :class:`str`, it will be passed to
:func:`matplotlib.cm.get_cmap`; if ``cmap`` is a tuple, its first
element will be used as a string to label the colorbar, and its
second element will be passed to :func:`matplotlib.cm.get_cmap` (unless
it is already an instance of :class:`~matplotlib.colors.Colormap`).
.. versionchanged:: 0.19
Support for passing :class:`~matplotlib.colors.Colormap` instances.
vlines : "auto" | list of float
A list in seconds at which to plot dashed vertical lines.
If "auto" and the supplied data includes 0, it is set to [0.]
and a vertical bar is plotted at time 0. If an empty list is passed,
no vertical lines are plotted.
ci : float | bool | callable | None
Confidence band around each ERP/F time series. If ``False`` or ``None``
no confidence band is drawn. If :class:`float`, ``ci`` must be between
0 and 1, and will set the threshold for a bootstrap
(single plot)/parametric (when ``axes=='topo'``) estimation of the
confidence band; ``True`` is equivalent to setting a threshold of 0.95
(i.e., the 95%% confidence band is drawn). If a callable, it must take
a single array (n_observations × n_times) as input and return upper and
lower confidence margins (2 × n_times). Defaults to ``True``.
truncate_yaxis : bool | 'auto'
Whether to shorten the y-axis spine. If 'auto', the spine is truncated
at the minimum and maximum ticks. If ``True``, it is truncated at the
multiple of 0.25 nearest to half the maximum absolute value of the
data. If ``truncate_xaxis=False``, only the far bound of the y-axis
will be truncated. Defaults to 'auto'.
truncate_xaxis : bool
Whether to shorten the x-axis spine. If ``True``, the spine is
truncated at the minimum and maximum ticks. If
``truncate_yaxis=False``, only the far bound of the x-axis will be
truncated. Defaults to ``True``.
ylim : dict | None
Y-axis limits for plots (after scaling has been applied). :class:`dict`
keys should match channel types; valid keys are eeg, mag, grad, misc
(example: ``ylim=dict(eeg=[-20, 20])``). If ``None``, the y-axis limits
will be set automatically by matplotlib. Defaults to ``None``.
invert_y : bool
Whether to plot negative values upward (as is sometimes done
for ERPs out of tradition). Defaults to ``False``.
show_sensors : bool | int | str | None
Whether to display an inset showing sensor locations on a head outline.
If :class:`int` or :class:`str`, indicates position of the inset (see
:func:`mpl_toolkits.axes_grid1.inset_locator.inset_axes`). If ``None``,
treated as ``True`` if there is only one channel in ``picks``. If
``True``, location is upper or lower right corner, depending on data
values. Defaults to ``None``.
legend : bool | int | str
Whether to show a legend for the colors/linestyles of the conditions
plotted. If :class:`int` or :class:`str`, indicates position of the
legend (see :func:`mpl_toolkits.axes_grid1.inset_locator.inset_axes`).
If ``True``, equivalent to ``'upper left'``. Defaults to ``True``.
split_legend : bool | None
Whether to separate color and linestyle in the legend. If ``None``,
a separate linestyle legend will still be shown if ``cmap`` is
specified. Defaults to ``None``.
axes : None | Axes instance | list of Axes | 'topo'
:class:`~matplotlib.axes.Axes` object to plot into. If plotting
multiple channel types (or multiple channels when ``combine=None``),
``axes`` should be a list of appropriate length containing
:class:`~matplotlib.axes.Axes` objects. If ``'topo'``, a new
:class:`~matplotlib.figure.Figure` is created with one axis for each
channel, in a topographical layout. If ``None``, a new
:class:`~matplotlib.figure.Figure` is created for each channel type.
Defaults to ``None``.
title : str | None
Title printed above the plot. If ``None``, a title will be
automatically generated based on channel name(s) or type(s) and the
value of the ``combine`` parameter. Defaults to ``None``.
show : bool
Whether to show the figure. Defaults to ``True``.
%(combine)s
If callable, the callable must accept one positional input (data of
shape ``(n_evokeds, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_epochs, n_times)``. For
example::
combine = lambda data: np.median(data, axis=1)
If ``combine`` is ``None``, channels are combined by computing GFP,
unless ``picks`` is a single channel (not channel type) or
``axes='topo'``, in which cases no combining is performed. Defaults to
``None``.
%(topomap_sphere_auto)s
Returns
-------
fig : list of Figure instances
A list of the figure(s) generated.
Notes
-----
If the parameters ``styles``, ``colors``, or ``linestyles`` are passed as
:class:`dicts <python:dict>`, then ``evokeds`` must also be a
:class:`python:dict`, and
the keys of the plot-style parameters must either match the keys of
``evokeds``, or match a ``/``-separated partial key ("condition") of
``evokeds``. For example, if evokeds has keys "Aud/L", "Aud/R", "Vis/L",
and "Vis/R", then ``linestyles=dict(L='--', R='-')`` will plot both Aud/L
and Vis/L conditions with dashed lines and both Aud/R and Vis/R conditions
with solid lines. Similarly, ``colors=dict(Aud='r', Vis='b')`` will plot
Aud/L and Aud/R conditions red and Vis/L and Vis/R conditions blue.
Color specification depends on whether a colormap has been provided in the
``cmap`` parameter. The following table summarizes how the ``colors``
parameter is interpreted:
.. cssclass:: table-bordered
.. rst-class:: midvalign
+-------------+----------------+------------------------------------------+
| ``cmap`` | ``colors`` | result |
+=============+================+==========================================+
| | None | matplotlib default color cycle; unique |
| | | color for each condition |
| +----------------+------------------------------------------+
| | | matplotlib default color cycle; lowest |
| | list or dict | integer mapped to first cycle color; |
| | of integers | conditions with same integer get same |
| None | | color; unspecified conditions are "gray" |
| +----------------+------------------------------------------+
| | list or dict | ``ValueError`` |
| | of floats | |
| +----------------+------------------------------------------+
| | list or dict | the specified hex colors; unspecified |
| | of hexadecimal | conditions are "gray" |
| | color strings | |
+-------------+----------------+------------------------------------------+
| | None | equally spaced colors on the colormap; |
| | | unique color for each condition |
| +----------------+------------------------------------------+
| | | equally spaced colors on the colormap; |
| | list or dict | lowest integer mapped to first cycle |
| string or | of integers | color; conditions with same integer |
| instance of | | get same color |
| matplotlib +----------------+------------------------------------------+
| Colormap | list or dict | floats mapped to corresponding colormap |
| | of floats | values |
| +----------------+------------------------------------------+
| | list or dict | |
| | of hexadecimal | ``TypeError`` |
| | color strings | |
+-------------+----------------+------------------------------------------+
"""
import matplotlib.pyplot as plt
from ..evoked import Evoked, _check_evokeds_ch_names_times
# build up evokeds into a dict, if it's not already
if isinstance(evokeds, Evoked):
evokeds = [evokeds]
if isinstance(evokeds, (list, tuple)):
evokeds_copy = evokeds.copy()
evokeds = dict()
comments = [getattr(_evk, 'comment', None) for _evk in evokeds_copy]
for idx, (comment, _evoked) in enumerate(zip(comments, evokeds_copy)):
key = str(idx + 1)
if comment: # only update key if comment is non-empty
if comments.count(comment) == 1: # comment is unique
key = comment
else: # comment is non-unique: prepend index
key = f'{key}: {comment}'
evokeds[key] = _evoked
del evokeds_copy
if not isinstance(evokeds, dict):
raise TypeError('"evokeds" must be a dict, list, or instance of '
'mne.Evoked; got {}'.format(type(evokeds).__name__))
evokeds = deepcopy(evokeds) # avoid modifying dict outside function scope
for cond, evoked in evokeds.items():
_validate_type(cond, 'str', 'Conditions')
if isinstance(evoked, Evoked):
evokeds[cond] = [evoked] # wrap singleton evokeds in a list
for evk in evokeds[cond]:
_validate_type(evk, Evoked, 'All evokeds entries ', 'Evoked')
# ensure same channels and times across all evokeds
all_evoked = sum(evokeds.values(), [])
_check_evokeds_ch_names_times(all_evoked)
del all_evoked
# get some representative info
conditions = list(evokeds)
one_evoked = evokeds[conditions[0]][0]
times = one_evoked.times
info = one_evoked.info
sphere = _check_sphere(sphere, info)
tmin, tmax = times[0], times[-1]
# set some defaults
if ylim is None:
ylim = dict()
if vlines == 'auto':
vlines = [0.] if (tmin < 0 < tmax) else []
_validate_type(vlines, (list, tuple), 'vlines', 'list or tuple')
# is picks a channel type (or None)?
orig_picks = deepcopy(picks)
picks, picked_types = _picks_to_idx(info, picks, return_kind=True)
# some things that depend on picks:
ch_names = np.array(one_evoked.ch_names)[picks].tolist()
ch_types = list(_get_channel_types(info, picks=picks, unique=True)
.intersection(_DATA_CH_TYPES_SPLIT + ('misc',))) # miscICA
picks_by_type = channel_indices_by_type(info, picks)
# discard picks from non-data channels (e.g., ref_meg)
good_picks = sum([picks_by_type[ch_type] for ch_type in ch_types], [])
picks = np.intersect1d(picks, good_picks)
if show_sensors is None:
show_sensors = (len(picks) == 1)
# cannot combine a single channel
if (len(picks) < 2) and combine is not None:
warn('Only {} channel in "picks"; cannot combine by method "{}".'
.format(len(picks), combine))
# `combine` defaults to GFP unless picked a single channel or axes='topo'
if combine is None and len(picks) > 1 and axes != 'topo':
combine = 'gfp'
# convert `combine` into callable (if None or str)
combine_func = _make_combine_callable(combine)
# title
title = _title_helper_pce(title, picked_types, picks=orig_picks,
ch_names=ch_names, combine=combine)
# setup axes
do_topo = (axes == 'topo')
if do_topo:
show_sensors = False
if len(picks) > 70:
logger.info('You are plotting to a topographical layout with >70 '
'sensors. This can be extremely slow. Consider using '
'mne.viz.plot_topo, which is optimized for speed.')
axes = ['topo'] * len(ch_types)
else:
if axes is None:
axes = (plt.subplots(figsize=(8, 6))[1] for _ in ch_types)
elif isinstance(axes, plt.Axes):
axes = [axes]
_validate_if_list_of_axes(axes, obligatory_len=len(ch_types))
if len(ch_types) > 1:
logger.info('Multiple channel types selected, returning one figure '
'per type.')
figs = list()
for ch_type, ax in zip(ch_types, axes):
_picks = picks_by_type[ch_type]
_ch_names = np.array(one_evoked.ch_names)[_picks].tolist()
_picks = ch_type if picked_types else _picks
# don't pass `combine` here; title will run through this helper
# function a second time & it will get added then
_title = _title_helper_pce(title, picked_types, picks=_picks,
ch_names=_ch_names, combine=None)
figs.extend(plot_compare_evokeds(
evokeds, picks=_picks, colors=colors, cmap=cmap,
linestyles=linestyles, styles=styles, vlines=vlines, ci=ci,
truncate_yaxis=truncate_yaxis, ylim=ylim, invert_y=invert_y,
legend=legend, show_sensors=show_sensors,
axes=ax, title=_title, split_legend=split_legend, show=show,
sphere=sphere))
return figs
# colors and colormap. This yields a `styles` dict with one entry per
# condition, specifying at least color and linestyle. THIS MUST BE DONE
# AFTER THE "MULTIPLE CHANNEL TYPES" LOOP
(_styles, _linestyles, _colors, _cmap, colorbar_title,
colorbar_ticks) = _handle_styles_pce(styles, linestyles, colors, cmap,
conditions)
# From now on there is only 1 channel type
assert len(ch_types) == 1
ch_type = ch_types[0]
# some things that depend on ch_type:
units = _handle_default('units')[ch_type]
scalings = _handle_default('scalings')[ch_type]
# prep for topo
pos_picks = picks # need this version of picks for sensor location inset
info = pick_info(info, sel=picks, copy=True)
all_ch_names = info['ch_names']
if not do_topo:
# add vacuous "index" (needed for topo) so same code works for both
axes = [(ax, 0) for ax in axes]
if np.array(picks).ndim < 2:
picks = [picks] # enables zipping w/ axes
else:
from .topo import iter_topography
fig = plt.figure(figsize=(18, 14))
def click_func(
ax_, pick_, evokeds=evokeds, colors=colors,
linestyles=linestyles, styles=styles, cmap=cmap, vlines=vlines,
ci=ci, truncate_yaxis=truncate_yaxis,
truncate_xaxis=truncate_xaxis, ylim=ylim, invert_y=invert_y,
show_sensors=show_sensors, legend=legend,
split_legend=split_legend, picks=picks, combine=combine):
plot_compare_evokeds(
evokeds=evokeds, colors=colors, linestyles=linestyles,
styles=styles, cmap=cmap, vlines=vlines, ci=ci,
truncate_yaxis=truncate_yaxis, truncate_xaxis=truncate_xaxis,
ylim=ylim, invert_y=invert_y, show_sensors=show_sensors,
legend=legend, split_legend=split_legend,
picks=picks[pick_], combine=combine, axes=ax_, show=True,
sphere=sphere)
layout = find_layout(info)
# shift everything to the right by 15% of one axes width
layout.pos[:, 0] += layout.pos[0, 2] * .15
layout.pos[:, 1] += layout.pos[0, 3] * .15
# `axes` will be a list of (axis_object, channel_index) tuples
axes = list(iter_topography(
info, layout=layout, on_pick=click_func,
fig=fig, fig_facecolor='w', axis_facecolor='w',
axis_spinecolor='k', layout_scale=.925, legend=True))
picks = list(picks)
del info
# for each axis, compute the grand average and (maybe) the CI
# (per sensor if topo, otherwise aggregating over sensors)
c_func = None if do_topo else combine_func
all_data = list()
all_cis = list()
for _picks, (ax, idx) in zip(picks, axes):
data_dict = dict()
ci_dict = dict()
for cond in conditions:
this_evokeds = evokeds[cond]
# skip CIs when possible; assign ci_fun first to get arg checking
ci_fun = _get_ci_function_pce(ci, do_topo=do_topo)
ci_fun = ci_fun if len(this_evokeds) > 1 else None
res = _get_data_and_ci(this_evokeds, combine, c_func, picks=_picks,
scaling=scalings, ci_fun=ci_fun)
data_dict[cond] = res[0]
if ci_fun is not None:
ci_dict[cond] = res[1]
all_data.append(data_dict) # grand means, or indiv. sensors if do_topo
all_cis.append(ci_dict)
del evokeds
# compute ylims
allvalues = list()
for _dict in all_data:
for _array in list(_dict.values()):
allvalues.append(_array[np.newaxis]) # to get same .ndim as CIs
for _dict in all_cis:
allvalues.extend(list(_dict.values()))
allvalues = np.concatenate(allvalues)
norm = np.all(allvalues > 0)
orig_ymin, orig_ymax = ylim.get(ch_type, [None, None])
ymin, ymax = _setup_vmin_vmax(allvalues, orig_ymin, orig_ymax, norm)
del allvalues
# add empty data and title for the legend axis
if do_topo:
all_data.append({cond: np.array([]) for cond in data_dict})
all_cis.append({cond: None for cond in ci_dict})
all_ch_names.append('')
# plot!
for (ax, idx), data, cis in zip(axes, all_data, all_cis):
if do_topo:
title = all_ch_names[idx]
# plot the data
_times = [] if idx == -1 else times
_plot_compare_evokeds(ax, data, conditions, _times, cis, _styles,
title, norm, do_topo)
# draw axes & vlines
skip_axlabel = do_topo and (idx != -1)
_draw_axes_pce(ax, ymin, ymax, truncate_yaxis, truncate_xaxis,
invert_y, vlines, tmin, tmax, units, skip_axlabel)
# add inset scalp plot showing location of sensors picked
if show_sensors:
_validate_type(show_sensors, (np.int64, bool, str, type(None)),
'show_sensors', 'numeric, str, None or bool')
if not _check_ch_locs(np.array(one_evoked.info['chs'])[pos_picks]):
warn('Cannot find channel coordinates in the supplied Evokeds. '
'Not showing channel locations.')
else:
_evoked_sensor_legend(one_evoked.info, pos_picks, ymin, ymax,
show_sensors, ax, sphere)
# add color/linestyle/colormap legend(s)
if legend:
_draw_legend_pce(legend, split_legend, _styles, _linestyles, _colors,
_cmap, do_topo, ax)
if cmap is not None:
_draw_colorbar_pce(ax, _colors, _cmap, colorbar_title, colorbar_ticks)
# finish
plt_show(show)
return [ax.figure]
| bsd-3-clause |
pizzathief/scipy | scipy/special/_precompute/lambertw.py | 8 | 2025 | """Compute a Pade approximation for the principle branch of the
Lambert W function around 0 and compare it to various other
approximations.
"""
import numpy as np
try:
import mpmath # type: ignore[import]
import matplotlib.pyplot as plt # type: ignore[import]
except ImportError:
pass
def lambertw_pade():
derivs = [mpmath.diff(mpmath.lambertw, 0, n=n) for n in range(6)]
p, q = mpmath.pade(derivs, 3, 2)
return p, q
def main():
print(__doc__)
with mpmath.workdps(50):
p, q = lambertw_pade()
p, q = p[::-1], q[::-1]
print("p = {}".format(p))
print("q = {}".format(q))
x, y = np.linspace(-1.5, 1.5, 75), np.linspace(-1.5, 1.5, 75)
x, y = np.meshgrid(x, y)
z = x + 1j*y
lambertw_std = []
for z0 in z.flatten():
lambertw_std.append(complex(mpmath.lambertw(z0)))
lambertw_std = np.array(lambertw_std).reshape(x.shape)
fig, axes = plt.subplots(nrows=3, ncols=1)
# Compare Pade approximation to true result
p = np.array([float(p0) for p0 in p])
q = np.array([float(q0) for q0 in q])
pade_approx = np.polyval(p, z)/np.polyval(q, z)
pade_err = abs(pade_approx - lambertw_std)
axes[0].pcolormesh(x, y, pade_err)
# Compare two terms of asymptotic series to true result
asy_approx = np.log(z) - np.log(np.log(z))
asy_err = abs(asy_approx - lambertw_std)
axes[1].pcolormesh(x, y, asy_err)
# Compare two terms of the series around the branch point to the
# true result
p = np.sqrt(2*(np.exp(1)*z + 1))
series_approx = -1 + p - p**2/3
series_err = abs(series_approx - lambertw_std)
im = axes[2].pcolormesh(x, y, series_err)
fig.colorbar(im, ax=axes.ravel().tolist())
plt.show()
fig, ax = plt.subplots(nrows=1, ncols=1)
pade_better = pade_err < asy_err
im = ax.pcolormesh(x, y, pade_better)
t = np.linspace(-0.3, 0.3)
ax.plot(-2.5*abs(t) - 0.2, t, 'r')
fig.colorbar(im, ax=ax)
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
ltiao/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
Mega-DatA-Lab/mxnet | example/mxnet_adversarial_vae/vaegan_mxnet.py | 18 | 37473 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Created on Jun 15, 2017
@author: shujon
'''
from __future__ import print_function
import mxnet as mx
import numpy as np
from sklearn.datasets import fetch_mldata
from matplotlib import pyplot as plt
import logging
import cv2
from datetime import datetime
from PIL import Image
import os
import argparse
from scipy.io import savemat
#from layer import GaussianSampleLayer
######################################################################
#An adversarial variational autoencoder implementation in mxnet
# following the implementation at https://github.com/JeremyCCHsu/tf-vaegan
# of paper `Larsen, Anders Boesen Lindbo, et al. "Autoencoding beyond pixels using a
# learned similarity metric." arXiv preprint arXiv:1512.09300 (2015).`
######################################################################
#constant operator in mxnet, not used in this code
@mx.init.register
class MyConstant(mx.init.Initializer):
def __init__(self, value):
super(MyConstant, self).__init__(value=value)
self.value = value
def _init_weight(self, _, arr):
arr[:] = mx.nd.array(self.value)
#######################################################################
#The encoder is a CNN which takes 32x32 image as input
# generates the 100 dimensional shape embedding as a sample from normal distribution
# using predicted meand and variance
#######################################################################
def encoder(nef, z_dim, batch_size, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12):
BatchNorm = mx.sym.BatchNorm
data = mx.sym.Variable('data')
#label = mx.sym.Variable('label')
e1 = mx.sym.Convolution(data, name='enc1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef, no_bias=no_bias)
ebn1 = BatchNorm(e1, name='encbn1', fix_gamma=fix_gamma, eps=eps)
eact1 = mx.sym.LeakyReLU(ebn1, name='encact1', act_type='leaky', slope=0.2)
e2 = mx.sym.Convolution(eact1, name='enc2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*2, no_bias=no_bias)
ebn2 = BatchNorm(e2, name='encbn2', fix_gamma=fix_gamma, eps=eps)
eact2 = mx.sym.LeakyReLU(ebn2, name='encact2', act_type='leaky', slope=0.2)
e3 = mx.sym.Convolution(eact2, name='enc3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*4, no_bias=no_bias)
ebn3 = BatchNorm(e3, name='encbn3', fix_gamma=fix_gamma, eps=eps)
eact3 = mx.sym.LeakyReLU(ebn3, name='encact3', act_type='leaky', slope=0.2)
e4 = mx.sym.Convolution(eact3, name='enc4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=nef*8, no_bias=no_bias)
ebn4 = BatchNorm(e4, name='encbn4', fix_gamma=fix_gamma, eps=eps)
eact4 = mx.sym.LeakyReLU(ebn4, name='encact4', act_type='leaky', slope=0.2)
eact4 = mx.sym.Flatten(eact4)
z_mu = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_mu")
z_lv = mx.sym.FullyConnected(eact4, num_hidden=z_dim, name="enc_lv")
#eps = mx.symbol.random_normal(loc=0, scale=1, shape=(batch_size,z_dim) )
#std = mx.symbol.sqrt(mx.symbol.exp(z_lv))
#z = mx.symbol.elemwise_add(z_mu, mx.symbol.broadcast_mul(eps, std))
z = z_mu + mx.symbol.broadcast_mul(mx.symbol.exp(0.5*z_lv),mx.symbol.random_normal(loc=0, scale=1,shape=(batch_size,z_dim)))
return z_mu, z_lv, z
#######################################################################
#The genrator is a CNN which takes 100 dimensional embedding as input
# and reconstructs the input image given to the encoder
#######################################################################
def generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim=100, activation='sigmoid'):
BatchNorm = mx.sym.BatchNorm
rand = mx.sym.Variable('rand')
rand = mx.sym.Reshape(rand, shape=(-1, z_dim, 1, 1))
#g1 = mx.sym.FullyConnected(rand, name="g1", num_hidden=2*2*ngf*8, no_bias=True)
g1 = mx.sym.Deconvolution(rand, name='gen1', kernel=(5,5), stride=(2,2),target_shape=(2,2), num_filter=ngf*8, no_bias=no_bias)
gbn1 = BatchNorm(g1, name='genbn1', fix_gamma=fix_gamma, eps=eps)
gact1 = mx.sym.Activation(gbn1, name="genact1", act_type="relu")
# 4 x 4
#gact1 = mx.sym.Reshape(gact1, shape=(-1, ngf * 8, 2, 2))
#g1 = mx.sym.Deconvolution(g0, name='g1', kernel=(4,4), num_filter=ngf*8, no_bias=no_bias)
#gbn1 = BatchNorm(g1, name='gbn1', fix_gamma=fix_gamma, eps=eps)
#gact1 = mx.sym.Activation(gbn1, name='gact1', act_type='relu')
g2 = mx.sym.Deconvolution(gact1, name='gen2', kernel=(5,5), stride=(2,2),target_shape=(4,4), num_filter=ngf*4, no_bias=no_bias)
gbn2 = BatchNorm(g2, name='genbn2', fix_gamma=fix_gamma, eps=eps)
gact2 = mx.sym.Activation(gbn2, name='genact2', act_type='relu')
g3 = mx.sym.Deconvolution(gact2, name='gen3', kernel=(5,5), stride=(2,2), target_shape=(8,8), num_filter=ngf*2, no_bias=no_bias)
gbn3 = BatchNorm(g3, name='genbn3', fix_gamma=fix_gamma, eps=eps)
gact3 = mx.sym.Activation(gbn3, name='genact3', act_type='relu')
g4 = mx.sym.Deconvolution(gact3, name='gen4', kernel=(5,5), stride=(2,2), target_shape=(16,16), num_filter=ngf, no_bias=no_bias)
gbn4 = BatchNorm(g4, name='genbn4', fix_gamma=fix_gamma, eps=eps)
gact4 = mx.sym.Activation(gbn4, name='genact4', act_type='relu')
g5 = mx.sym.Deconvolution(gact4, name='gen5', kernel=(5,5), stride=(2,2), target_shape=(32,32), num_filter=nc, no_bias=no_bias)
gout = mx.sym.Activation(g5, name='genact5', act_type=activation)
return gout
#######################################################################
# First part of the discriminator which takes a 32x32 image as input
# and output a convolutional feature map, this is required to calculate
# the layer loss
#######################################################################
def discriminator1(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12):
BatchNorm = mx.sym.BatchNorm
data = mx.sym.Variable('data')
#label = mx.sym.Variable('label')
d1 = mx.sym.Convolution(data, name='d1', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf, no_bias=no_bias)
dact1 = mx.sym.LeakyReLU(d1, name='dact1', act_type='leaky', slope=0.2)
d2 = mx.sym.Convolution(dact1, name='d2', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*2, no_bias=no_bias)
dbn2 = BatchNorm(d2, name='dbn2', fix_gamma=fix_gamma, eps=eps)
dact2 = mx.sym.LeakyReLU(dbn2, name='dact2', act_type='leaky', slope=0.2)
d3 = mx.sym.Convolution(dact2, name='d3', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*4, no_bias=no_bias)
dbn3 = BatchNorm(d3, name='dbn3', fix_gamma=fix_gamma, eps=eps)
dact3 = mx.sym.LeakyReLU(dbn3, name='dact3', act_type='leaky', slope=0.2)
return dact3
#######################################################################
# Second part of the discriminator which takes a 256x8x8 feature map as input
# and generates the loss based on whether the input image was a real one or fake one
#######################################################################
def discriminator2(ndf, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12):
BatchNorm = mx.sym.BatchNorm
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
d4 = mx.sym.Convolution(data, name='d4', kernel=(5,5), stride=(2,2), pad=(2,2), num_filter=ndf*8, no_bias=no_bias)
dbn4 = BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=eps)
dact4 = mx.sym.LeakyReLU(dbn4, name='dact4', act_type='leaky', slope=0.2)
#d5 = mx.sym.Convolution(dact4, name='d5', kernel=(4,4), num_filter=1, no_bias=no_bias)
#d5 = mx.sym.Flatten(d5)
h = mx.sym.Flatten(dact4)
d5 = mx.sym.FullyConnected(h, num_hidden=1, name="d5")
#dloss = (0.5 * (label == 0) + (label != 0) ) * mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss')
dloss = mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss')
return dloss
#######################################################################
# GaussianLogDensity loss calculation for layer wise loss
#######################################################################
def GaussianLogDensity(x, mu, log_var, name='GaussianLogDensity', EPSILON = 1e-6):
c = mx.sym.ones_like(log_var)*2.0 * 3.1416
c = mx.symbol.log(c)
var = mx.sym.exp(log_var)
x_mu2 = mx.symbol.square(x - mu) # [Issue] not sure the dim works or not?
x_mu2_over_var = mx.symbol.broadcast_div(x_mu2, var + EPSILON)
log_prob = -0.5 * (c + log_var + x_mu2_over_var)
#log_prob = (x_mu2)
log_prob = mx.symbol.sum(log_prob, axis=1, name=name) # keep_dims=True,
return log_prob
#######################################################################
# Calculate the discriminator layer loss
#######################################################################
def DiscriminatorLayerLoss():
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
data = mx.sym.Flatten(data)
label = mx.sym.Flatten(label)
label = mx.sym.BlockGrad(label)
zeros = mx.sym.zeros_like(data)
output = -GaussianLogDensity(label, data, zeros)
dloss = mx.symbol.MakeLoss(mx.symbol.mean(output),name='lloss')
#dloss = mx.sym.MAERegressionOutput(data=data, label=label, name='lloss')
return dloss
#######################################################################
# KLDivergence loss
#######################################################################
def KLDivergenceLoss():
data = mx.sym.Variable('data')
mu1, lv1 = mx.sym.split(data, num_outputs=2, axis=0)
mu2 = mx.sym.zeros_like(mu1)
lv2 = mx.sym.zeros_like(lv1)
v1 = mx.sym.exp(lv1)
v2 = mx.sym.exp(lv2)
mu_diff_sq = mx.sym.square(mu1 - mu2)
dimwise_kld = .5 * (
(lv2 - lv1) + mx.symbol.broadcast_div(v1, v2) + mx.symbol.broadcast_div(mu_diff_sq, v2) - 1.)
KL = mx.symbol.sum(dimwise_kld, axis=1)
KLloss = mx.symbol.MakeLoss(mx.symbol.mean(KL),name='KLloss')
return KLloss
#######################################################################
# Get the dataset
#######################################################################
def get_data(path, activation):
#mnist = fetch_mldata('MNIST original')
#import ipdb; ipdb.set_trace()
data = []
image_names = []
#set the path to the 32x32 images of caltech101 dataset created using the convert_data_inverted.py script
#path = '/home/ubuntu/datasets/caltech101/data/images32x32/'
#path_wo_ext = '/home/ubuntu/datasets/caltech101/data/images/'
for filename in os.listdir(path):
img = cv2.imread(os.path.join(path,filename), cv2.IMREAD_GRAYSCALE)
image_names.append(filename)
if img is not None:
data.append(img)
data = np.asarray(data)
if activation == 'sigmoid':
#converting image values from 0 to 1 as the generator activation is sigmoid
data = data.astype(np.float32)/(255.0)
elif activation == 'tanh':
#converting image values from -1 to 1 as the generator activation is tanh
data = data.astype(np.float32)/(255.0/2) - 1.0
data = data.reshape((data.shape[0], 1, data.shape[1], data.shape[2]))
np.random.seed(1234) # set seed for deterministic ordering
p = np.random.permutation(data.shape[0])
X = data[p]
return X, image_names
#######################################################################
# Create a random iterator for generator
#######################################################################
class RandIter(mx.io.DataIter):
def __init__(self, batch_size, ndim):
self.batch_size = batch_size
self.ndim = ndim
self.provide_data = [('rand', (batch_size, ndim, 1, 1))]
self.provide_label = []
def iter_next(self):
return True
def getdata(self):
return [mx.random.normal(0, 1.0, shape=(self.batch_size, self.ndim, 1, 1))]
#######################################################################
# fill the ith grid of the buffer matrix with the values from the img
# buf : buffer matrix
# i : serial of the image in the 2D grid
# img : image data
# shape : ( height width depth ) of image
#######################################################################
def fill_buf(buf, i, img, shape):
#n = buf.shape[0]/shape[1]
# grid height is a multiple of individual image height
m = buf.shape[0]/shape[0]
sx = (i%m)*shape[1]
sy = (i/m)*shape[0]
buf[sy:sy+shape[0], sx:sx+shape[1], :] = img
#######################################################################
# create a grid of images and save it as a final image
# title : grid image name
# X : array of images
#######################################################################
def visual(title, X, activation):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
if activation == 'sigmoid':
X = np.clip((X)*(255.0), 0, 255).astype(np.uint8)
elif activation == 'tanh':
X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
#buff = cv2.cvtColor(buff, cv2.COLOR_BGR2RGB)
#local_out = 1
#num = 1
cv2.imwrite('%s.jpg' % (title), buff)
#######################################################################
# adverial training of the VAE
#######################################################################
def train(dataset, nef, ndf, ngf, nc, batch_size, Z, lr, beta1, epsilon, ctx, check_point, g_dl_weight, output_path, checkpoint_path, data_path, activation,num_epoch, save_after_every, visualize_after_every, show_after_every):
#encoder
z_mu, z_lv, z = encoder(nef, Z, batch_size)
symE = mx.sym.Group([z_mu, z_lv, z])
#generator
symG = generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim = Z, activation=activation )
#discriminator
h = discriminator1(ndf)
dloss = discriminator2(ndf)
#symD = mx.sym.Group([dloss, h])
symD1 = h
symD2 = dloss
#symG, symD = make_dcgan_sym(nef, ngf, ndf, nc)
#mx.viz.plot_network(symG, shape={'rand': (batch_size, 100, 1, 1)}).view()
#mx.viz.plot_network(symD, shape={'data': (batch_size, nc, 64, 64)}).view()
# ==============data==============
#if dataset == 'caltech':
X_train, _ = get_data(data_path, activation)
#import ipdb; ipdb.set_trace()
train_iter = mx.io.NDArrayIter(X_train, batch_size=batch_size, shuffle=True)
#elif dataset == 'imagenet':
# train_iter = ImagenetIter(imgnet_path, batch_size, (3, 32, 32))
#print('=============================================', str(batch_size), str(Z))
rand_iter = RandIter(batch_size, Z)
label = mx.nd.zeros((batch_size,), ctx=ctx)
# =============module E=============
modE = mx.mod.Module(symbol=symE, data_names=('data',), label_names=None, context=ctx)
modE.bind(data_shapes=train_iter.provide_data)
modE.init_params(initializer=mx.init.Normal(0.02))
modE.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-6,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods = [modE]
# =============module G=============
modG = mx.mod.Module(symbol=symG, data_names=('rand',), label_names=None, context=ctx)
modG.bind(data_shapes=rand_iter.provide_data, inputs_need_grad=True)
modG.init_params(initializer=mx.init.Normal(0.02))
modG.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-6,
'beta1': beta1,
'epsilon': epsilon,
#'rescale_grad': (1.0/batch_size)
})
mods.append(modG)
# =============module D=============
modD1 = mx.mod.Module(symD1, label_names=[], context=ctx)
modD2 = mx.mod.Module(symD2, label_names=('label',), context=ctx)
modD = mx.mod.SequentialModule()
modD.add(modD1).add(modD2, take_labels=True, auto_wiring=True)
#modD = mx.mod.Module(symbol=symD, data_names=('data',), label_names=('label',), context=ctx)
modD.bind(data_shapes=train_iter.provide_data,
label_shapes=[('label', (batch_size,))],
inputs_need_grad=True)
modD.init_params(initializer=mx.init.Normal(0.02))
modD.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-3,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods.append(modD)
# =============module DL=============
symDL = DiscriminatorLayerLoss()
modDL = mx.mod.Module(symbol=symDL, data_names=('data',), label_names=('label',), context=ctx)
modDL.bind(data_shapes=[('data', (batch_size,nef * 4,4,4))], ################################################################################################################################ fix 512 here
label_shapes=[('label', (batch_size,nef * 4,4,4))],
inputs_need_grad=True)
modDL.init_params(initializer=mx.init.Normal(0.02))
modDL.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
# =============module KL=============
symKL = KLDivergenceLoss()
modKL = mx.mod.Module(symbol=symKL, data_names=('data',), label_names=None, context=ctx)
modKL.bind(data_shapes=[('data', (batch_size*2,Z))],
inputs_need_grad=True)
modKL.init_params(initializer=mx.init.Normal(0.02))
modKL.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods.append(modKL)
def norm_stat(d):
return mx.nd.norm(d)/np.sqrt(d.size)
mon = mx.mon.Monitor(10, norm_stat, pattern=".*output|d1_backward_data", sort=True)
mon = None
if mon is not None:
for mod in mods:
pass
# ============calculating prediction accuracy==============
def facc(label, pred):
pred = pred.ravel()
label = label.ravel()
return ((pred > 0.5) == label).mean()
# ============calculating binary cross-entropy loss==============
def fentropy(label, pred):
pred = pred.ravel()
label = label.ravel()
return -(label*np.log(pred+1e-12) + (1.-label)*np.log(1.-pred+1e-12)).mean()
# ============calculating KL divergence loss==============
def kldivergence(label, pred):
#pred = pred.ravel()
#label = label.ravel()
mean, log_var = np.split(pred, 2, axis=0)
var = np.exp(log_var)
KLLoss = -0.5 * np.sum(1 + log_var - np.power(mean, 2) - var)
KLLoss = KLLoss / nElements
return KLLoss
mG = mx.metric.CustomMetric(fentropy)
mD = mx.metric.CustomMetric(fentropy)
mE = mx.metric.CustomMetric(kldivergence)
mACC = mx.metric.CustomMetric(facc)
print('Training...')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
# =============train===============
for epoch in range(num_epoch):
train_iter.reset()
for t, batch in enumerate(train_iter):
rbatch = rand_iter.next()
if mon is not None:
mon.tic()
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
#print('======================================================================')
#print(outG)
# update discriminator on fake
label[:] = 0
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
#modD.update()
gradD11 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays]
gradD12 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
#update discriminator on decoded
modE.forward(batch, is_train=True)
mu, lv, z = modE.get_outputs()
#z = GaussianSampleLayer(mu, lv)
z = z.reshape((batch_size, Z, 1, 1))
sample = mx.io.DataBatch([z], label=None, provide_data = [('rand', (batch_size, Z, 1, 1))])
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 0
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
#modD.update()
gradD21 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays]
gradD22 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update discriminator on real
label[:] = 1
batch.label = [label]
modD.forward(batch, is_train=True)
lx = [out.copyto(out.context) for out in modD1.get_outputs()]
modD.backward()
for gradsr, gradsf, gradsd in zip(modD1._exec_group.grad_arrays, gradD11, gradD21):
for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd):
gradr += 0.5 * (gradf + gradd)
for gradsr, gradsf, gradsd in zip(modD2._exec_group.grad_arrays, gradD12, gradD22):
for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd):
gradr += 0.5 * (gradf + gradd)
modD.update()
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update generator twice as the discriminator is too strong
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 1 <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
#modG.update()
gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
#modG.update()
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2):
for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2):
grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2)
modG.update()
mG.update([label], modD.get_outputs())
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 2 <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
#modG.update()
gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
#modG.update()
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2):
for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2):
grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2)
modG.update()
mG.update([label], modD.get_outputs())
##update encoder--------------------------------------------------
#modE.forward(batch, is_train=True)
#mu, lv, z = modE.get_outputs()
#z = z.reshape((batch_size, Z, 1, 1))
#sample = mx.io.DataBatch([z], label=None, provide_data = [('rand', (batch_size, Z, 1, 1))])
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
#update generator
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
DLloss = modDL.get_outputs()
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
#modG.update()
#print('updating encoder=====================================')
#update encoder
nElements = batch_size
#var = mx.ndarray.exp(lv)
modKL.forward(mx.io.DataBatch([mx.ndarray.concat(mu,lv, dim=0)]), is_train=True)
KLloss = modKL.get_outputs()
modKL.backward()
gradKLLoss = modKL.get_input_grads()
diffG = modG.get_input_grads()
#print('======================================================================')
#print(np.sum(diffG[0].asnumpy()))
diffG = diffG[0].reshape((batch_size, Z))
modE.backward(mx.ndarray.split(gradKLLoss[0], num_outputs=2, axis=0) + [diffG])
modE.update()
#print('mu type : ')
#print(type(mu))
pred = mx.ndarray.concat(mu,lv, dim=0)
#print(pred)
mE.update([pred], [pred])
if mon is not None:
mon.toc_print()
t += 1
if t % show_after_every == 0:
print('epoch:', epoch, 'iter:', t, 'metric:', mACC.get(), mG.get(), mD.get(), mE.get(), KLloss[0].asnumpy(), DLloss[0].asnumpy())
mACC.reset()
mG.reset()
mD.reset()
mE.reset()
if epoch % visualize_after_every == 0:
visual(output_path +'gout'+str(epoch), outG[0].asnumpy(), activation)
#diff = diffD[0].asnumpy()
#diff = (diff - diff.mean())/diff.std()
#visual('diff', diff)
visual(output_path + 'data'+str(epoch), batch.data[0].asnumpy(), activation)
if check_point and epoch % save_after_every == 0:
print('Saving...')
modG.save_params(checkpoint_path + '/%s_G-%04d.params'%(dataset, epoch))
modD.save_params(checkpoint_path + '/%s_D-%04d.params'%(dataset, epoch))
modE.save_params(checkpoint_path + '/%s_E-%04d.params'%(dataset, epoch))
#######################################################################
# Test the VAE with a pretrained encoder and generator.
# Keep the batch size 1
#######################################################################
def test(nef, ngf, nc, batch_size, Z, ctx, pretrained_encoder_path, pretrained_generator_path, output_path, data_path, activation, save_embedding, embedding_path = ''):
#encoder
z_mu, z_lv, z = encoder(nef, Z, batch_size)
symE = mx.sym.Group([z_mu, z_lv, z])
#generator
symG = generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim = Z, activation=activation )
#symG, symD = make_dcgan_sym(nef, ngf, ndf, nc)
#mx.viz.plot_network(symG, shape={'rand': (batch_size, 100, 1, 1)}).view()
#mx.viz.plot_network(symD, shape={'data': (batch_size, nc, 64, 64)}).view()
# ==============data==============
X_test, image_names = get_data(data_path, activation)
#import ipdb; ipdb.set_trace()
test_iter = mx.io.NDArrayIter(X_test, batch_size=batch_size, shuffle=False)
# =============module E=============
modE = mx.mod.Module(symbol=symE, data_names=('data',), label_names=None, context=ctx)
modE.bind(data_shapes=test_iter.provide_data)
#modE.init_params(initializer=mx.init.Normal(0.02))
modE.load_params(pretrained_encoder_path)
# =============module G=============
modG = mx.mod.Module(symbol=symG, data_names=('rand',), label_names=None, context=ctx)
modG.bind(data_shapes=[('rand', (1, Z, 1, 1))])
#modG.init_params(initializer=mx.init.Normal(0.02))
modG.load_params(pretrained_generator_path)
print('Testing...')
# =============test===============
test_iter.reset()
for t, batch in enumerate(test_iter):
#update discriminator on decoded
modE.forward(batch, is_train=False)
mu, lv, z = modE.get_outputs()
#z = GaussianSampleLayer(mu, lv)
mu = mu.reshape((batch_size, Z, 1, 1))
sample = mx.io.DataBatch([mu], label=None, provide_data = [('rand', (batch_size, Z, 1, 1))])
modG.forward(sample, is_train=False)
outG = modG.get_outputs()
visual(output_path + '/' + 'gout'+str(t), outG[0].asnumpy(), activation)
visual(output_path + '/' + 'data'+str(t), batch.data[0].asnumpy(), activation)
image_name = image_names[t].split('.')[0]
if save_embedding:
savemat(embedding_path+'/'+image_name+'.mat', {'embedding':mu.asnumpy()})
def parse_args():
parser = argparse.ArgumentParser(description='Train and Test an Adversarial Variatiional Encoder')
parser.add_argument('--train', help='train the network', action='store_true')
parser.add_argument('--test', help='test the network', action='store_true')
parser.add_argument('--save_embedding', help='saves the shape embedding of each input image', action='store_true')
parser.add_argument('--dataset', help='dataset name', default='caltech', type=str)
parser.add_argument('--activation', help='activation i.e. sigmoid or tanh', default='sigmoid', type=str)
parser.add_argument('--training_data_path', help='training data path', default='/home/ubuntu/datasets/caltech101/data/images32x32/', type=str)
parser.add_argument('--testing_data_path', help='testing data path', default='/home/ubuntu/datasets/MPEG7dataset/images/', type=str)
parser.add_argument('--pretrained_encoder_path', help='pretrained encoder model path', default='checkpoints32x32_sigmoid/caltech_E-0045.params', type=str)
parser.add_argument('--pretrained_generator_path', help='pretrained generator model path', default='checkpoints32x32_sigmoid/caltech_G-0045.params', type=str)
parser.add_argument('--output_path', help='output path for the generated images', default='outputs32x32_sigmoid/', type=str)
parser.add_argument('--embedding_path', help='output path for the generated embeddings', default='outputs32x32_sigmoid/', type=str)
parser.add_argument('--checkpoint_path', help='checkpoint saving path ', default='checkpoints32x32_sigmoid/', type=str)
parser.add_argument('--nef', help='encoder filter count in the first layer', default=64, type=int)
parser.add_argument('--ndf', help='discriminator filter count in the first layer', default=64, type=int)
parser.add_argument('--ngf', help='generator filter count in the second last layer', default=64, type=int)
parser.add_argument('--nc', help='generator filter count in the last layer i.e. 1 for grayscale image, 3 for RGB image', default=1, type=int)
parser.add_argument('--batch_size', help='batch size, keep it 1 during testing', default=64, type=int)
parser.add_argument('--Z', help='embedding size', default=100, type=int)
parser.add_argument('--lr', help='learning rate', default=0.0002, type=float)
parser.add_argument('--beta1', help='beta1 for adam optimizer', default=0.5, type=float)
parser.add_argument('--epsilon', help='epsilon for adam optimizer', default=1e-5, type=float)
parser.add_argument('--g_dl_weight', help='discriminator layer loss weight', default=1e-1, type=float)
parser.add_argument('--gpu', help='gpu index', default=0, type=int)
parser.add_argument('--num_epoch', help='number of maximum epochs ', default=45, type=int)
parser.add_argument('--save_after_every', help='save checkpoint after every this number of epochs ', default=5, type=int)
parser.add_argument('--visualize_after_every', help='save output images after every this number of epochs', default=5, type=int)
parser.add_argument('--show_after_every', help='show metrics after this number of iterations', default=10, type=int)
args = parser.parse_args()
return args
def main():
args = parse_args()
# gpu context
ctx = mx.gpu(args.gpu)
# checkpoint saving flags
check_point = True
if args.train:
train(args.dataset, args.nef, args.ndf, args.ngf, args.nc, args.batch_size, args.Z, args.lr, args.beta1, args.epsilon, ctx, check_point, args.g_dl_weight, args.output_path, args.checkpoint_path, args.training_data_path, args.activation, args.num_epoch, args.save_after_every, args.visualize_after_every, args.show_after_every)
if args.test:
test(args.nef, args.ngf, args.nc, 1, args.Z, ctx, args.pretrained_encoder_path, args.pretrained_generator_path, args.output_path, args.testing_data_path, args.activation, args.save_embedding, args.embedding_path)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
| apache-2.0 |
jhpyle/docassemble | docassemble_webapp/docassemble/webapp/machinelearning.py | 1 | 35497 | from docassemble.webapp.core.models import MachineLearning
from docassemble.base.core import DAObject, DAList, DADict
from docassemble.webapp.db_object import db
from sqlalchemy import or_, and_, select, delete
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from pandas.api.types import CategoricalDtype
import numpy as np
import re
import random
import codecs
import pickle
import datetime
import os
import yaml
import json
import sys
from docassemble_pattern.vector import count, KNN, SVM, stem, PORTER, words, Document
from docassemble.base.logger import logmessage
from docassemble.webapp.backend import get_info_from_file_reference
from docassemble.webapp.fixpickle import fix_pickle_obj
import docassemble.base.functions
learners = dict()
svms = dict()
lastmodtime = dict()
reset_counter = dict()
class MachineLearningEntry(DAObject):
"""An entry in the machine learning system"""
def classify(self, dependent=None):
"""Sets the dependent variable of the machine learning entry"""
if dependent is not None:
self.dependent = dependent
self.ml.set_dependent_by_id(self.id, self.dependent)
return self
def save(self):
"""Saves the entry to the data set. The independent variable must be
defined in order to save."""
args = dict(independent=self.independent)
if hasattr(self, 'dependent'):
args['dependent'] = self.dependent
if hasattr(self, 'key'):
args['key'] = self.key
if hasattr(self, 'id'):
args['id'] = self.id
if hasattr(self, 'info') and self.info is not None:
args['info'] = self.info
self.ml._save_entry(**args)
return self
def predict(self, probabilities=False):
"""Returns predictions for this entry's independent variable."""
return self.ml.predict(self.independent, probabilities=probabilities)
class MachineLearner(object):
"""Base class for machine learning objects"""
def __init__(self, *pargs, **kwargs):
if len(pargs) > 0:
if ':' in pargs[0]:
raise Exception("MachineLearner: you cannot use a colon in a machine learning name")
question = docassemble.base.functions.get_current_question()
if question is not None:
self.group_id = question.interview.get_ml_store() + ':' + pargs[0]
else:
self.group_id = pargs[0]
if len(pargs) > 1:
self.initial_file = pargs[1]
if 'group_id' in kwargs:
self.group_id = kwargs['group_id']
if 'initial_file' in kwargs:
self.initial_file = kwargs['initial_file']
if kwargs.get('use_initial_file', False):
question = docassemble.base.functions.get_current_question()
if question is not None:
self.initial_file = question.interview.get_ml_store()
self.reset_counter = 0
def reset(self):
self.reset_counter += 1
def _initialize(self, reset=False):
if hasattr(self, 'initial_file'):
self.start_from_file(self.initial_file)
if hasattr(self, 'group_id') and (self.group_id not in lastmodtime or reset):
lastmodtime[self.group_id] = datetime.datetime(year=1970, month=1, day=1)
reset_counter = self.reset_counter
def export_training_set(self, output_format='json', key=None):
self._initialize()
output = list()
for entry in self.classified_entries(key=key):
the_entry = dict(independent=entry.independent, dependent=entry.dependent)
if entry.info is not None:
the_entry['info'] = entry.info
output.append(the_entry)
if output_format == 'json':
return json.dumps(output, sort_keys=True, indent=4)
elif output_format == 'yaml':
return yaml.safe_dump(output, default_flow_style=False)
else:
raise Exception("Unknown output format " + str(output_format))
def dependent_in_use(self, key=None):
in_use = set()
if key is None:
query = db.session.execute(select(MachineLearning.dependent).where(MachineLearning.group_id == self.group_id).group_by(MachineLearning.dependent))
else:
query = db.session.execute(select(MachineLearning.dependent).where(and_(MachineLearning.group_id == self.group_id, MachineLearning.key == key)).group_by(MachineLearning.dependent))
for record in query:
if record.dependent is not None:
in_use.add(fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64')))
return sorted(in_use)
def is_empty(self):
existing_entry = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id)).first()
if existing_entry is None:
return True
return False
def start_from_file(self, fileref):
#logmessage("Starting from file " + str(fileref))
existing_entry = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id)).first()
if existing_entry is not None:
return
file_info = get_info_from_file_reference(fileref, folder='sources')
if 'fullpath' not in file_info or file_info['fullpath'] is None or not os.path.exists(file_info['fullpath']):
return
#raise Exception("File reference " + str(fileref) + " is invalid")
with open(file_info['fullpath'], 'r', encoding='utf-8') as fp:
content = fp.read()
if 'mimetype' in file_info and file_info['mimetype'] == 'application/json':
aref = json.loads(content)
elif 'extension' in file_info and file_info['extension'].lower() in ['yaml', 'yml']:
aref = yaml.load(content, Loader=yaml.FullLoader)
if type(aref) is dict and hasattr(self, 'group_id'):
the_group_id = re.sub(r'.*:', '', self.group_id)
if the_group_id in aref:
aref = aref[the_group_id]
if type(aref) is list:
nowtime = datetime.datetime.utcnow()
for entry in aref:
if 'independent' in entry:
new_entry = MachineLearning(group_id=self.group_id, independent=codecs.encode(pickle.dumps(entry['independent']), 'base64').decode(), dependent=codecs.encode(pickle.dumps(entry.get('dependent', None)), 'base64').decode(), modtime=nowtime, create_time=nowtime, active=True, key=entry.get('key', None), info=codecs.encode(pickle.dumps(entry['info']), 'base64').decode() if entry.get('info', None) is not None else None)
db.session.add(new_entry)
db.session.commit()
def add_to_training_set(self, independent, dependent, key=None, info=None):
self._initialize()
nowtime = datetime.datetime.utcnow()
new_entry = MachineLearning(group_id=self.group_id, independent=codecs.encode(pickle.dumps(independent), 'base64').decode(), dependent=codecs.encode(pickle.dumps(dependent), 'base64').decode(), info=codecs.encode(pickle.dumps(info), 'base64').decode() if info is not None else None, create_time=nowtime, modtime=nowtime, active=True, key=key)
db.session.add(new_entry)
db.session.commit()
return new_entry.id
def save_for_classification(self, indep, key=None, info=None):
self._initialize()
if key is None:
existing_entry = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id, dependent=None, independent=codecs.encode(pickle.dumps(indep), 'base64').decode())).scalar()
else:
existing_entry = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id, key=key, independent=codecs.encode(pickle.dumps(indep), 'base64').decode())).scalar()
if existing_entry is not None:
#logmessage("entry is already there")
return existing_entry.id
new_entry = MachineLearning(group_id=self.group_id, independent=codecs.encode(pickle.dumps(indep), 'base64').decode(), create_time=datetime.datetime.utcnow(), active=False, key=key, info=codecs.encode(pickle.dumps(info), 'base64').decode() if info is not None else None)
db.session.add(new_entry)
db.session.commit()
return new_entry.id
def retrieve_by_id(self, the_id):
self._initialize()
existing_entry = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id, id=the_id)).scalar()
if existing_entry is None:
raise Exception("There was no entry in the database for id " + str(the_id) + " with group id " + str(self.group_id))
if existing_entry.dependent:
dependent = fix_pickle_obj(codecs.decode(bytearray(existing_entry.dependent, encoding='utf-8'), 'base64'))
return MachineLearningEntry(ml=self, id=existing_entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(existing_entry.independent, encoding='utf-8'), 'base64')), dependent=dependent, create_time=existing_entry.create_time, key=existing_entry.key, info=fix_pickle_obj(codecs.decode(bytearray(existing_entry.info, encoding='utf-8'), 'base64')) if existing_entry.info is not None else None)
else:
return MachineLearningEntry(ml=self, id=existing_entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(existing_entry.independent, encoding='utf-8'), 'base64')), create_time=existing_entry.create_time, key=existing_entry.key, info=fix_pickle_obj(codecs.decode(bytearray(existing_entry.info, encoding='utf-8'), 'base64')) if existing_entry.info is not None else None)
def one_unclassified_entry(self, key=None):
self._initialize()
if key is None:
entry = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id, active=False).order_by(MachineLearning.id)).scalar()
else:
entry = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id, key=key, active=False).order_by(MachineLearning.id)).scalar()
if entry is None:
return None
return MachineLearningEntry(ml=self, id=entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(entry.independent, encoding='utf-8'), 'base64')), create_time=entry.create_time, key=entry.key, info=fix_pickle_obj(codecs.decode(bytearray(entry.info, encoding='utf-8'), 'base64')) if entry.info is not None else None)._set_instance_name_for_method()
def new_entry(self, **kwargs):
return MachineLearningEntry(ml=self, **kwargs)._set_instance_name_for_method()
def unclassified_entries(self, key=None):
self._initialize()
results = DAList()._set_instance_name_for_method()
results.gathered = True
if key is None:
query = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id, active=False).order_by(MachineLearning.id)).scalars()
else:
query = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id, key=key, active=False).order_by(MachineLearning.id)).scalars()
for entry in query:
results.appendObject(MachineLearningEntry, ml=self, id=entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(entry.independent, encoding='utf-8'), 'base64')), create_time=entry.create_time, key=entry.key, info=fix_pickle_obj(codecs.decode(bytearray(entry.info, encoding='utf-8'), 'base64')) if entry.info is not None else None)
return results
def classified_entries(self, key=None):
self._initialize()
results = DAList()
results.gathered = True
results.set_random_instance_name()
if key is None:
query = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id, active=True).order_by(MachineLearning.id)).scalars()
else:
query = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id, active=True, key=key).order_by(MachineLearning.id)).scalars()
for entry in query:
results.appendObject(MachineLearningEntry, ml=self, id=entry.id, independent=fix_pickle_obj(codecs.decode(bytearray(entry.independent, encoding='utf-8'), 'base64')), dependent=fix_pickle_obj(codecs.decode(bytearray(entry.dependent, encoding='utf-8'), 'base64')), info=fix_pickle_obj(codecs.decode(bytearray(entry.info, encoding='utf-8'), 'base64')) if entry.info is not None else None, create_time=entry.create_time, key=entry.key)
return results
def _save_entry(self, **kwargs):
self._initialize()
the_id = kwargs.get('id', None)
need_to_reset = False
if the_id is None:
the_entry = MachineLearning(group_id=self.group_id)
existing = False
else:
the_entry = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id, id=the_id)).scalar()
existing = True
if the_entry is None:
raise Exception("There was no entry in the database for id " + str(the_id) + " with group id " + str(self.group_id))
if 'dependent' in kwargs:
if existing and the_entry.dependent is not None and the_entry.dependent != kwargs['dependent']:
need_to_reset = True
the_entry.dependent = codecs.encode(pickle.dumps(kwargs['dependent']), 'base64').decode()
the_entry.active = True
if 'independent' in kwargs:
if existing and the_entry.independent is not None and the_entry.independent != kwargs['independent']:
need_to_reset = True
the_entry.independent = codecs.encode(pickle.dumps(kwargs['independent']), 'base64').decode()
if 'key' in kwargs:
the_entry.key = kwargs['key']
if 'info' in kwargs:
the_entry.info = codecs.encode(pickle.dumps(kwargs['info']), 'base64').decode()
the_entry.modtime = datetime.datetime.utcnow()
if not existing:
db.session.add(the_entry)
db.session.commit()
if need_to_reset:
self.reset()
def set_dependent_by_id(self, the_id, the_dependent):
self._initialize()
existing_entry = db.session.execute(select(MachineLearning).filter_by(group_id=self.group_id, id=the_id).with_for_update()).scalar()
if existing_entry is None:
db.session.commit()
raise Exception("There was no entry in the database for id " + str(the_id) + " with group id " + str(self.group_id))
existing_entry.dependent = codecs.encode(pickle.dumps(the_dependent), 'base64').decode()
existing_entry.modtime = datetime.datetime.utcnow()
existing_entry.active = True
db.session.commit()
def delete_by_id(self, the_id):
self._initialize()
db.session.execute(delete(MachineLearning).filter_by(group_id=self.group_id, id=the_id))
db.session.commit()
self.reset()
def delete_by_key(self, key):
self._initialize()
db.session.execute(delete(MachineLearning).filter_by(group_id=self.group_id, key=key))
db.session.commit()
self.reset()
def save(self):
db.session.commit()
def _train_from_db(self):
#logmessage("Doing train_from_db")
self._initialize()
nowtime = datetime.datetime.utcnow()
success = False
for record in db.session.execute(select(MachineLearning.independent, MachineLearning.dependent).where(and_(MachineLearning.group_id == self.group_id, MachineLearning.active == True, MachineLearning.modtime > lastmodtime[self.group_id]))).all():
#logmessage("Training...")
self._train(fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64')), fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64')))
success = True
lastmodtime[self.group_id] = nowtime
return success
def delete_training_set(self):
self._initialize()
db.session.execute(delete(MachineLearning).filter_by(group_id=self.group_id))
db.session.commit()
def _train(self, indep, depend):
pass
def _predict(self, indep):
pass
class SimpleTextMachineLearner(MachineLearner):
"""A class used to interact with the machine learning system, using the K Nearest Neighbors method"""
def _learner(self):
return KNN()
def _initialize(self):
"""Initializes a fresh machine learner."""
if self.group_id not in reset_counter or self.reset_counter != reset_counter[self.group_id]:
need_to_reset = True
if hasattr(self, 'group_id') and (self.group_id not in learners or need_to_reset):
learners[self.group_id] = self._learner()
return super()._initialize(reset=need_to_reset)
def _train(self, indep, depend):
"""Trains the machine learner given an independent variable and a corresponding dependent variable."""
if indep is None:
return
the_text = re.sub(r'[\n\r]+', r' ', indep).lower()
learners[self.group_id].train(Document(the_text.lower(), stemmer=PORTER), depend)
def predict(self, indep, probabilities=False):
"""Returns a list of predicted dependent variables for a given independent variable."""
indep = re.sub(r'[\n\r]+', r' ', indep).lower()
if not self._train_from_db():
return list()
probs = dict()
for key, value in learners[self.group_id].classify(Document(indep.lower(), stemmer=PORTER), discrete=False).items():
probs[key] = value
if not len(probs):
single_result = learners[self.group_id].classify(Document(indep.lower(), stemmer=PORTER))
if single_result is not None:
probs[single_result] = 1.0
if probabilities:
return [(x, probs[x]) for x in sorted(probs.keys(), key=probs.get, reverse=True)]
else:
return sorted(probs.keys(), key=probs.get, reverse=True)
def confusion_matrix(self, key=None, output_format=None, split=False):
"""Returns a confusion matrix for the model based on splitting the data set randomly into two pieces, training on one and testing on the other"""
if split:
list_of_dependent = self.dependent_in_use(key=key)
else:
list_of_dependent = [None]
output = ''
matrices = dict()
for current_dep in list_of_dependent:
testing_set = list()
model = self._learner()
for record in self.classified_entries(key=key):
if split:
dep_result = str(record.dependent == current_dep)
else:
dep_result = record.dependent
if random.random() < 0.5:
model.train(Document(record.independent.lower(), stemmer=PORTER), dep_result)
else:
testing_set.append((Document(record.independent.lower(), stemmer=PORTER), dep_result))
matrix = model.confusion_matrix(documents=testing_set)
matrices[current_dep] = matrix
if output_format == 'html':
if split:
output += '<h4>' + current_dep + "</h4>"
vals = matrix.keys()
output += '<table class="table table-bordered"><thead><tr><td></td><td></td><td style="text-align: center" colspan="' + str(len(vals)) + '">Actual</td></tr><tr><th></th><th></th>'
first = True
for val in vals:
output += '<th>' + val + '</th>'
output += '</tr></thead><tbody>'
for val_a in vals:
output += '<tr>'
if first:
output += '<td style="text-align: right; vertical-align: middle;" rowspan="' + str(len(vals)) + '">Predicted</td>'
first = False
output += '<th>' + val_a + '</th>'
for val_b in vals:
output += '<td>' + str(matrix[val_b].get(val_a, 0)) + '</td>'
output += '</tr>'
output += '</tbody></table>'
#output += "\n\n`" + str(matrix) + "`"
# output += '<ul>'
# for document, actual in testing_set:
# predicted = model.classify(document)
# output += '<li>Predicted: ' + predicted + '; Actual: ' + actual + '</li>'
# output += '</ul>'
if output_format == 'html':
return output
if split:
ret_val = matrices
else:
ret_val = matrices[None]
if output_format == 'json':
return json.dumps(ret_val, sort_keys=True, indent=4)
if output_format == 'yaml':
return yaml.safe_dump(ret_val, default_flow_style=False)
if output_format is None:
return ret_val
return ret_val
def reset(self):
"""Clears the cache of the machine learner"""
return super().reset()
def delete_training_set(self):
"""Deletes all of the training data in the database"""
return super().delete_training_set()
def delete_by_key(self, key):
"""Deletes all of the training data in the database that was added with a given key"""
return super().delete_training_set(key)
def delete_by_id(self, the_id):
"""Deletes the entry in the training data with the given ID"""
return super().delete_by_id(the_id)
def set_dependent_by_id(self, the_id, depend):
"""Sets the dependent variable for the entry in the training data with the given ID"""
return super().set_dependent_by_id(the_id, depend)
def classified_entries(self, key=None):
"""Returns a list of entries in the data that have been classified."""
return super().classified_entries(key=key)
def unclassified_entries(self, key=None):
"""Returns a list of entries in the data that have not yet been classified."""
return super().unclassified_entries(key=key)
def one_unclassified_entry(self, key=None):
"""Returns the first entry in the data that has not yet been classified, or None if all entries have been classified."""
return super().one_unclassified_entry(key=key)
def retrieve_by_id(self, the_id):
"""Returns the entry in the data that has the given ID."""
return super().retrieve_by_id(the_id)
def save_for_classification(self, indep, key=None, info=None):
"""Creates a not-yet-classified entry in the data for the given independent variable and returns the ID of the entry."""
return super().save_for_classification(indep, key=key, info=info)
def add_to_training_set(self, indep, depend, key=None, info=None):
"""Creates an entry in the data for the given independent and dependent variable and returns the ID of the entry."""
return super().add_to_training_set(indep, depend, key=key, info=info)
def is_empty(self):
"""Returns True if no data have been defined, otherwise returns False."""
return super().is_empty()
def dependent_in_use(self, key=None):
"""Returns a sorted list of unique dependent variables in the data."""
return super().dependent_in_use(key=key)
def export_training_set(self, output_format='json'):
"""Returns the classified entries in the data as JSON or YAML."""
return super().export_training_set(output_format=output_format)
def new_entry(self, **kwargs):
"""Creates a new entry in the data."""
return super().new_entry(**kwargs)
class SVMMachineLearner(SimpleTextMachineLearner):
"""Machine Learning object using the Symmetric Vector Machine method"""
def _learner(self):
return SVM(extension='libsvm')
class RandomForestMachineLearner(MachineLearner):
def _learner(self):
return RandomForestClassifier()
def feature_importances(self):
"""Returns the importances of each of the features"""
if not self._train_from_db():
return list()
return learners[self.group_id]['learner'].feature_importances_
def _initialize(self):
"""Initializes a fresh machine learner."""
if self.group_id not in reset_counter or self.reset_counter != reset_counter[self.group_id]:
need_to_reset = True
if hasattr(self, 'group_id') and (self.group_id not in learners or need_to_reset):
learners[self.group_id] = dict(learner=self._learner(), dep_type=None, indep_type=dict(), indep_categories=dict(), dep_categories=None)
return super()._initialize(reset=need_to_reset)
def _train_from_db(self):
#logmessage("Doing train_from_db")
self._initialize()
nowtime = datetime.datetime.utcnow()
success = False
data = list()
depend_data = list()
for record in db.session.execute(select(MachineLearning).where(and_(MachineLearning.group_id == self.group_id, MachineLearning.active == True, MachineLearning.modtime > lastmodtime[self.group_id]))).scalars().all():
indep_var = fix_pickle_obj(codecs.decode(bytearray(record.independent, encoding='utf-8'), 'base64'))
depend_var = fix_pickle_obj(codecs.decode(bytearray(record.dependent, encoding='utf-8'), 'base64'))
if type(depend_var) is str:
depend_var = str(depend_var)
if learners[self.group_id]['dep_type'] is not None:
if type(depend_var) is not learners[self.group_id]['dep_type']:
if type(depend_var) is int and learners[self.group_id]['dep_type'] is float:
depend_var = float(depend_var)
elif type(depend_var) is float and learners[self.group_id]['dep_type'] is int:
learners[self.group_id]['dep_type'] = float
else:
raise Exception("RandomForestMachineLearner: dependent variable type was not consistent")
else:
if not isinstance(depend_var, (str, int, bool, float)):
raise Exception("RandomForestMachineLearner: dependent variable type for key " + repr(key) + " was not a standard variable type")
learners[self.group_id]['dep_type'] = type(depend_var)
depend_data.append(depend_var)
if isinstance(indep_var, DADict):
indep_var = indep_var.elements
if type(indep_var) is not dict:
raise Exception("RandomForestMachineLearner: independent variable was not a dictionary")
for key, val in indep_var.items():
if type(val) is str:
val = str(val)
if key in learners[self.group_id]['indep_type']:
if type(val) is not learners[self.group_id]['indep_type'][key]:
if type(val) is int and learners[self.group_id]['indep_type'][key] is float:
val = float(val)
elif type(val) is float and learners[self.group_id]['indep_type'][key] is int:
learners[self.group_id]['indep_type'][key] = float
else:
raise Exception("RandomForestMachineLearner: independent variable type for key " + repr(key) + " was not consistent")
else:
if not isinstance(val, (str, int, bool, float)):
raise Exception("RandomForestMachineLearner: independent variable type for key " + repr(key) + " was not a standard variable type")
learners[self.group_id]['indep_type'][key] = type(val)
data.append(indep_var)
success = True
if success:
df = pd.DataFrame(data)
for key, val in learners[self.group_id]['indep_type'].items():
if val is str:
df[key] = pd.Series(df[key], dtype="category")
learners[self.group_id]['indep_categories'][key] = df[key].cat.categories
df = pd.get_dummies(df, dummy_na=True)
if learners[self.group_id]['dep_type'] is str:
y = pd.Series(depend_data, dtype="category")
learners[self.group_id]['dep_categories'] = y.cat.categories
else:
y = pd.Series(depend_data)
learners[self.group_id]['learner'].fit(df, list(y))
lastmodtime[self.group_id] = nowtime
return success
def predict(self, indep, probabilities=False):
"""Returns a list of predicted dependent variables for a given independent variable."""
if not self._train_from_db():
return list()
if isinstance(indep, DADict):
indep = indep.elements
if type(indep) is not dict:
raise Exception("RandomForestMachineLearner: independent variable was not a dictionary")
indep = process_independent_data(indep)
indep_to_use = dict()
for key, val in indep.items():
if key in learners[self.group_id]['indep_type']:
if type(val) is str:
val = str(val)
if type(val) is not learners[self.group_id]['indep_type'][key]:
if type(val) is int and learners[self.group_id]['indep_type'][key] is float:
val = float(val)
elif type(val) is float and learners[self.group_id]['indep_type'][key] is int:
learners[self.group_id]['indep_type'][key] = float
else:
raise Exception("RandomForestMachineLearner: the independent variable type for key " + repr(key) + " was not consistent. Stored was " + str(learners[self.group_id]['indep_type'][key]) + " and type was " + str(type(val)))
else:
raise Exception("RandomForestMachineLearner: independent variable key " + repr(key) + " was not recognized")
if isinstance(val, str):
if val not in learners[self.group_id]['indep_categories'][key]:
val = np.nan
indep_to_use[key] = val
df = pd.DataFrame([indep_to_use])
for key, val in indep_to_use.items():
if learners[self.group_id]['indep_type'][key] is str:
#df[key] = pd.Series(df[key]).astype('category', categories=learners[self.group_id]['indep_categories'][key])
df[key] = pd.Series(df[key]).astype(CategoricalDtype(learners[self.group_id]['indep_categories'][key]))
df = pd.get_dummies(df, dummy_na=True)
pred = learners[self.group_id]['learner'].predict_proba(df)
indexno = 0
result = list()
for x in pred[0]:
result.append((learners[self.group_id]['dep_categories'][indexno], x))
indexno += 1
result = sorted(result, key=lambda x: x[1], reverse=True)
if probabilities:
return result
return [x[0] for x in result]
def reset(self):
"""Clears the cache of the machine learner"""
return super().reset()
def delete_training_set(self):
"""Deletes all of the training data in the database"""
return super().delete_training_set()
def delete_by_key(self, key):
"""Deletes all of the training data in the database that was added with a given key"""
return super().delete_training_set(key)
def delete_by_id(self, the_id):
"""Deletes the entry in the training data with the given ID"""
return super().delete_by_id(the_id)
def set_dependent_by_id(self, the_id, depend):
"""Sets the dependent variable for the entry in the training data with the given ID"""
return super().set_dependent_by_id(the_id, depend)
def classified_entries(self, key=None):
"""Returns a list of entries in the data that have been classified."""
return super().classified_entries(key=key)
def unclassified_entries(self, key=None):
"""Returns a list of entries in the data that have not yet been classified."""
return super().unclassified_entries(key=key)
def one_unclassified_entry(self, key=None):
"""Returns the first entry in the data that has not yet been classified, or None if all entries have been classified."""
return super().one_unclassified_entry(key=key)
def retrieve_by_id(self, the_id):
"""Returns the entry in the data that has the given ID."""
return super().retrieve_by_id(the_id)
def save_for_classification(self, indep, key=None, info=None):
"""Creates a not-yet-classified entry in the data for the given independent variable and returns the ID of the entry."""
indep = process_independent_data(indep)
return super().save_for_classification(indep, key=key, info=info)
def add_to_training_set(self, indep, depend, key=None, info=None):
"""Creates an entry in the data for the given independent and dependent variable and returns the ID of the entry."""
indep = process_independent_data(indep)
return super().add_to_training_set(indep, depend, key=key, info=info)
def is_empty(self):
"""Returns True if no data have been defined, otherwise returns False."""
return super().is_empty()
def dependent_in_use(self, key=None):
"""Returns a sorted list of unique dependent variables in the data."""
return super().dependent_in_use(key=key)
def export_training_set(self, output_format='json'):
"""Returns the classified entries in the data as JSON or YAML."""
return super().export_training_set(output_format=output_format)
def new_entry(self, **kwargs):
"""Creates a new entry in the data."""
return super().new_entry(**kwargs)
# def export_training_sets(prefix, output_format='json'):
# output = dict()
# re_prefix = re.compile(r'^' + prefix + ':')
# for record in db.session.query(MachineLearning).filter(MachineLearning.group_id.like(prefix + '%')).group_by(MachineLearning.group_id):
# the_group_id = re_prefix.sub('', record.group_id)
# output[the_group_id].append(dict(independent=record.independent, dependent=record.dependent))
# if output_format == 'json':
# return json.dumps(output, sort_keys=True, indent=4)
# elif output_format == 'yaml':
# return yaml.safe_dump(output, default_flow_style=False)
# else:
# raise Exception("Unknown output format " + str(output_format))
def process_independent_data(data):
result = dict()
for key, val in data.items():
if isinstance(val, DADict) or type(val) is dict:
for subkey, subval in val.items():
if not isinstance(subval, (str, bool, int, float)):
raise Exception('RandomForestMachineLearner: invalid data type ' + subval.__class__.__name__ + ' in data')
result[key + '_' + subkey] = subval
else:
if not isinstance(val, (str, bool, int, float)):
raise Exception('RandomForestMachineLearner: invalid data type ' + subval.__class__.__name__ + ' in data')
result[key] = val
return result
| mit |
kazemakase/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
akrherz/idep | scripts/cligen/qc_summarize.py | 2 | 7378 | """Need something that prints diagnostics of our climate file"""
import sys
import datetime
import numpy as np
import netCDF4
import pytz
import pandas as pd
import requests
from pyiem.dep import read_cli
from pyiem.iemre import hourly_offset
from pyiem.util import c2f, mm2inch
def compute_stage4(lon, lat, year):
"""Build a daily dataframe for the stage4 data"""
nc = netCDF4.Dataset("/mesonet/data/stage4/%s_stage4_hourly.nc" % (year,))
lons = nc.variables["lon"][:]
lats = nc.variables["lat"][:]
dist = ((lons - lon) ** 2 + (lats - lat) ** 2) ** 0.5
(yidx, xidx) = np.unravel_index(dist.argmin(), dist.shape)
print(
("Computed stage4 nclon:%.2f nclat:%.2f yidx:%s xidx:%s ")
% (lons[yidx, xidx], lats[yidx, xidx], yidx, xidx)
)
p01i = mm2inch(nc.variables["p01m"][:, yidx, xidx])
nc.close()
df = pd.DataFrame(
{"precip": 0.0},
index=pd.date_range(
"%s-01-01" % (year,), "%s-12-31" % (year,), tz="America/Chicago"
),
)
for date in df.index.values:
date2 = datetime.datetime.utcfromtimestamp(date.tolist() / 1e9)
ts = datetime.datetime(date2.year, date2.month, date2.day, 6)
ts = ts.replace(tzinfo=pytz.utc)
ts = ts.astimezone(pytz.timezone("America/Chicago"))
ts = ts.replace(hour=0)
ts = ts.astimezone(pytz.utc)
tidx = hourly_offset(ts)
# values are in the rears
val = np.ma.sum(p01i[tidx + 1 : tidx + 25])
if val > 0:
df.at[date, "precip"] = val # close enough
return df
def fn2lonlat(filename):
"""Convert the filename to lon and lat"""
tokens = filename.split("/")[-1].rsplit(".", 1)[0].split("x")
return [0 - float(tokens[0]), float(tokens[1])]
def do_qc(fn, df, year):
"""Run some checks on this dataframe"""
(lon, lat) = fn2lonlat(fn)
stage4 = compute_stage4(lon, lat, year)
# Does the frame appear to have all dates?
if len(df.index) != len(df.resample("D").mean().index):
print("ERROR: Appears to be missing dates!")
if open(fn).read()[-1] != "\n":
print("ERROR: File does not end with \\n")
print("--------- Summary stats from the .cli file")
print("YEAR | RAIN | MAXRATE | MAXACC | #DAYS | #>1RT | RAD/D")
print(" --- | --- | --- | --- | --- | --- | ---")
for _year, gdf in df.groupby(by=df.index.year):
print(
("%s | %6.2f | %7.2f | %7.2f | %6i | %6i | %6.0f")
% (
_year,
mm2inch(gdf["pcpn"].sum()),
mm2inch(gdf["maxr"].max()),
mm2inch(gdf["pcpn"].max()),
len(gdf[gdf["pcpn"] > 0].index),
len(gdf[gdf["maxr"] > 25.4].index),
gdf["rad"].mean(),
)
)
print("---- Months with < 0.05 precipitation ----")
gdf = df.groupby(by=[df.index.year, df.index.month])["pcpn"].sum()
print(gdf[gdf < 1.0])
print("----- Average high temperature -----")
print("YEAR | Avg High F | Avg Low F | Days > 100F")
print(" --- | --- | --- | ---")
for _year, gdf in df.groupby(by=df.index.year):
print(
("%s | %6.2f | %6.2f | %3i")
% (
_year,
c2f(gdf["tmax"].mean()),
c2f(gdf["tmin"].mean()),
len(gdf[gdf["tmax"] > 37.7].index),
)
)
monthly = df[df.index.year == year]["pcpn"].resample("M").sum().copy()
monthly = pd.DataFrame(
{"dep": mm2inch(monthly.values)}, index=range(1, 13)
)
# Get prism, for a bulk comparison
prism = requests.get(
(
"http://mesonet.agron.iastate.edu/json/prism/"
"%.2f/%.2f/%s0101-%s1231"
)
% (lon, lat, year, year)
).json()
rows = []
for entry in prism["data"]:
rows.append(
{
"date": datetime.datetime.strptime(
entry["valid"][:10], "%Y-%m-%d"
),
"precip": entry["precip_in"],
}
)
prismdf = pd.DataFrame(rows)
prismdf.set_index("date", inplace=True)
monthly["prism"] = prismdf["precip"].resample("M").sum().copy().values
# Compare daily values
iemjson = requests.get(
(
"http://mesonet.agron.iastate.edu/iemre/multiday/"
"%s-01-01/%s-12-31/%s/%s/json"
)
% (year, year, lat, lon)
).json()
rows = []
for entry in iemjson["data"]:
rows.append(
{
"date": datetime.datetime.strptime(entry["date"], "%Y-%m-%d"),
"precip": entry["daily_precip_in"],
}
)
iemdf = pd.DataFrame(rows)
iemdf.set_index("date", inplace=True)
print("PRISM %s precip is: %.2f" % (year, prismdf["precip"].sum()))
print("IEMRE sum precip is: %.2f" % (iemdf["precip"].sum(),))
print("StageIV sum precip is: %.2f" % (stage4["precip"].sum(),))
monthly["stage4"] = stage4["precip"].resample("M").sum().copy().values
monthly["iemre"] = iemdf["precip"].resample("M").sum().copy().values
monthly["prism-dep"] = monthly["prism"] - monthly["dep"]
monthly["iemre-dep"] = monthly["iemre"] - monthly["dep"]
print(" --------- %s Monthly Totals --------" % (year,))
print(monthly)
df.at[
slice(datetime.date(year, 1, 1), datetime.date(year, 12, 31)),
"stage4_precip",
] = stage4["precip"].values
df["iemre_precip"] = iemdf["precip"]
df["diff_precip"] = df["pcpn_in"] - df["iemre_precip"]
df["diff_stage4"] = df["pcpn_in"] - df["stage4_precip"]
print(" --- Top 5 Largest DEP > IEMRE ----")
print(
df[
[
"diff_precip",
"pcpn_in",
"iemre_precip",
"stage4_precip",
"diff_stage4",
]
]
.sort_values(by="diff_precip", ascending=False)
.head()
)
print(" --- Top 5 Largest IEMRE > DEP ----")
print(
df[
[
"diff_precip",
"pcpn_in",
"iemre_precip",
"stage4_precip",
"diff_stage4",
]
]
.sort_values(by="diff_precip", ascending=True)
.head()
)
print(" --- Top 10 Largest Stage4 > DEP ----")
print(
df[
[
"diff_precip",
"pcpn_in",
"iemre_precip",
"stage4_precip",
"diff_stage4",
]
]
.sort_values(by="diff_stage4", ascending=True)
.head(10)
)
print(" vvv job listing based on the above vvv")
for dt in df.sort_values(by="diff_stage4", ascending=True).head(10).index:
print(
"python daily_clifile_editor.py 0 %s %s %s"
% (dt.year, dt.month, dt.day)
)
df2 = df.loc[slice(datetime.date(year, 1, 1), datetime.date(year, 1, 31))][
["diff_precip", "pcpn_in", "iemre_precip", "stage4_precip"]
].sort_values(by="diff_precip")
print(" --- Daily values for month " "")
print(df2)
def main(argv):
"""Do Stuff"""
fn = argv[1]
year = int(argv[2])
df = read_cli(fn)
df["pcpn_in"] = mm2inch(df["pcpn"].values)
do_qc(fn, df, year)
if __name__ == "__main__":
main(sys.argv)
| mit |
LSSTDESC/Twinkles | python/desc/twinkles/sprinkler.py | 2 | 28310 | '''
Created on Feb 6, 2015
@author: cmccully
'''
from __future__ import absolute_import, division, print_function
from future.utils import iteritems
import time
import om10
import numpy as np
import re
import json
import os
import pandas as pd
import copy
import gzip
import shutil
from lsst.utils import getPackageDir
from lsst.sims.utils import SpecMap, defaultSpecMap
from lsst.sims.catUtils.baseCatalogModels import GalaxyTileCompoundObj
from lsst.sims.catUtils.matchSED import matchBase
from lsst.sims.photUtils import Bandpass, BandpassDict, Sed
from lsst.sims.utils import radiansFromArcsec
from lsst.sims.catUtils.supernovae import SNObject
__all__ = ['sprinklerCompound', 'sprinkler']
class sprinklerCompound(GalaxyTileCompoundObj):
objid = 'sprinklerCompound'
objectTypeId = 66
cached_sprinkling = False
agn_cache_file = None
sne_cache_file = None
defs_file = None
sed_path = None
def _final_pass(self, results):
#From the original GalaxyTileCompoundObj final pass method
for name in results.dtype.fields:
if 'raJ2000' in name or 'decJ2000' in name:
results[name] = np.radians(results[name])
# the stored procedure on fatboy that queries the galaxies
# constructs galtileid by taking
#
# tileid*10^8 + galid
#
# this causes galtileid to be so large that the uniqueIDs in the
# Twinkles InstanceCatalogs are too large for PhoSim to handle.
# Since Twinkles is only focused on one tile on the sky, we will remove
# the factor of 10^8, making the uniqueIDs a more manageable size
# results['galtileid'] = results['galtileid']#%100000000
#Use Sprinkler now
sp = sprinkler(results, self.mjd, self.specFileMap, self.sed_path,
density_param=1.0,
cached_sprinkling=self.cached_sprinkling,
agn_cache_file=self.agn_cache_file,
sne_cache_file=self.sne_cache_file,
defs_file=self.defs_file)
results = sp.sprinkle()
return results
class sprinkler():
def __init__(self, catsim_cat, visit_mjd, specFileMap, sed_path,
om10_cat='twinkles_lenses_v2.fits',
sne_cat='dc2_sne_cat.csv', density_param=1., cached_sprinkling=False,
agn_cache_file=None, sne_cache_file=None, defs_file=None,
write_sn_sed=True):
"""
Parameters
----------
catsim_cat: catsim catalog
The results array from an instance catalog.
visit_mjd: float
The mjd of the visit
specFileMap:
This will tell the instance catalog where to write the files
sed_path: str
This tells where to write out SNe SED files
om10_cat: optional, defaults to 'twinkles_lenses_v2.fits
fits file with OM10 catalog
sne_cat: optional, defaults to 'dc2_sne_cat.csv'
density_param: `np.float`, optioanl, defaults to 1.0
the fraction of eligible agn objects that become lensed and should
be between 0.0 and 1.0.
cached_sprinkling: boolean
If true then pick from a preselected list of galtileids
agn_cache_file: str
sne_cache_file: str
defs_file: str
write_sn_sed: boolean
Controls whether or not to actually write supernova
SEDs to disk (default=True)
Returns
-------
input_catalog:
results array with lens systems added.
"""
t_start = time.time()
twinklesDir = getPackageDir('Twinkles')
om10_cat = os.path.join(twinklesDir, 'data', om10_cat)
self.write_sn_sed = write_sn_sed
self.catalog_column_names = catsim_cat.dtype.names
# ****** THIS ASSUMES THAT THE ENVIRONMENT VARIABLE OM10_DIR IS SET *******
lensdb = om10.DB(catalog=om10_cat, vb=False)
self.lenscat = lensdb.lenses.copy()
self.density_param = density_param
self.bandpassDict = BandpassDict.loadTotalBandpassesFromFiles(bandpassNames=['i'])
self.lsst_band_indexes = {'u':0, 'g':1, 'r':2, 'i':3, 'z':4, 'y':5}
self.sne_catalog = pd.read_csv(os.path.join(twinklesDir, 'data', sne_cat))
#self.sne_catalog = self.sne_catalog.iloc[:101] ### Remove this after testing
self.used_systems = []
self._visit_mjd = visit_mjd
self.sn_obj = SNObject(0., 0.)
self.write_dir = specFileMap.subdir_map['(^specFileGLSN)']
self.sed_path = sed_path
self.cached_sprinkling = cached_sprinkling
if self.cached_sprinkling is True:
if ((agn_cache_file is None) | (sne_cache_file is None)):
raise AttributeError('Must specify cache files if using cached_sprinkling.')
#agn_cache_file = os.path.join(twinklesDir, 'data', 'test_agn_galtile_cache.csv')
self.agn_cache = pd.read_csv(agn_cache_file)
#sne_cache_file = os.path.join(twinklesDir, 'data', 'test_sne_galtile_cache.csv')
self.sne_cache = pd.read_csv(sne_cache_file)
else:
self.agn_cache = None
self.sne_cache = None
if defs_file is None:
self.defs_file = os.path.join(twinklesDir, 'data', 'catsim_defs.csv')
else:
self.defs_file = defs_file
self.sedDir = getPackageDir('sims_sed_library')
self.imSimBand = Bandpass()
self.imSimBand.imsimBandpass()
#self.LRG_name = 'Burst.25E09.1Z.spec'
#self.LRG = Sed()
#self.LRG.readSED_flambda(str(galDir + self.LRG_name))
#return
#Calculate imsimband magnitudes of source galaxies for matching
agn_fname = str(getPackageDir('sims_sed_library') + '/agnSED/agn.spec.gz')
src_iband = self.lenscat['MAGI_IN']
src_z = self.lenscat['ZSRC']
self.src_mag_norm = []
for src, s_z in zip(src_iband, src_z):
agn_sed = Sed()
agn_sed.readSED_flambda(agn_fname)
agn_sed.redshiftSED(s_z, dimming=True)
self.src_mag_norm.append(matchBase().calcMagNorm([src],
agn_sed,
self.bandpassDict))
#self.src_mag_norm = matchBase().calcMagNorm(src_iband,
# [agn_sed]*len(src_iband),
#
# self.bandpassDict)
has_sn_truth_params = False
for name in self.catalog_column_names:
if 'sn_truth_params' in name:
has_sn_truth_params = True
break
self.defs_dict = {}
self.logging_is_sprinkled = False
self.store_sn_truth_params = False
with open(self.defs_file, 'r') as f:
for line in f:
line_defs = line.strip().split(',')
if len(line_defs) > 1:
if 'is_sprinkled' in line_defs[1]:
self.logging_is_sprinkled = True
if 'sn_truth_params' in line_defs[1] and has_sn_truth_params:
self.store_sn_truth_params = True
if len(line_defs) == 2:
self.defs_dict[line_defs[0]] = line_defs[1]
else:
self.defs_dict[line_defs[0]] = tuple((ll for ll in line_defs[1:]))
duration = time.time()-t_start
duration /= 3600.0
print('initialized sprinkler in %e hours' % duration)
@property
def visit_mjd(self):
return self._visit_mjd
@visit_mjd.setter
def visit_mjd(self, val):
self._visit_mjd = val
def sprinkle(self, input_catalog, catalog_band):
# Define a list that we can write out to a text file
lenslines = []
# For each galaxy in the catsim catalog
if isinstance(self.defs_dict['galtileid'], tuple):
galid_dex = self.defs_dict['galtileid'][0]
else:
galid_dex = self.defs_dict['galtileid']
agn_magnorm_dex = self.defs_dict['galaxyAgn_magNorm']
agn_magnorm_array = np.array([row[agn_magnorm_dex] for row in input_catalog])
nan_magnorm = np.isnan(agn_magnorm_array)
if self.cached_sprinkling:
if not hasattr(self, '_unq_agn_gid'):
self._unq_agn_gid = np.unique(self.agn_cache['galtileid'].values)
self._unq_sne_gid = np.unique(self.sne_cache['galtileid'].values)
galtileid_array = np.array([row[galid_dex] for row in input_catalog])
valid_agn = np.where(np.logical_and(np.logical_not(nan_magnorm),
np.in1d(galtileid_array,
self._unq_agn_gid,
assume_unique=True)))[0]
valid_sne = np.where(np.logical_and(nan_magnorm,
np.in1d(galtileid_array,
self._unq_sne_gid,
assume_unique=True)))[0]
else:
valid_agn = np.where(np.logical_not(nan_magnorm))[0]
valid_sne = np.where(nan_magnorm)[0]
new_rows = []
# print("Running sprinkler. Catalog Length: ", len(input_catalog))
for rowNum in valid_agn:
row = input_catalog[rowNum]
galtileid = row[galid_dex]
if not self.cached_sprinkling:
candidates = self.find_lens_candidates(row[self.defs_dict['galaxyAgn_redshift']],
row[self.defs_dict['galaxyAgn_magNorm']])
rng = np.random.RandomState(galtileid % (2**32 -1))
pick_value = rng.uniform()
if len(candidates) == 0 or pick_value>self.density_param:
# If there aren't any lensed sources at this redshift from
# OM10 move on the next object
continue
# Randomly choose one the lens systems
# (can decide with or without replacement)
# Sort first to make sure the same choice is made every time
candidates = candidates[np.argsort(candidates['twinklesId'])]
newlens = rng.choice(candidates)
else:
twinkles_sys_cache = self.agn_cache.query('galtileid == %i' % galtileid)['twinkles_system'].values[0]
newlens = self.lenscat[np.where(self.lenscat['twinklesId'] == twinkles_sys_cache)[0]][0]
#varString = json.loads(row[self.defs_dict['galaxyAgn_varParamStr']])
# varString[self.defs_dict['pars']]['t0_mjd'] = 59300.0
#row[self.defs_dict['galaxyAgn_varParamStr']] = json.dumps(varString)
# Append the lens galaxy
# For each image, append the lens images
default_lensrow = None
if newlens['NIMG'] > 0:
default_lensrow = row.copy()
default_lensrow[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_internalAv']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_magNorm']] = 999.
default_lensrow[self.defs_dict['galaxyDisk_sedFilename']] = None
default_lensrow[self.defs_dict['galaxyBulge_majorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_minorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_positionAngle']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_internalAv']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_magNorm']] = 999.
default_lensrow[self.defs_dict['galaxyBulge_sedFilename']] = None
default_lensrow[self.defs_dict['galaxyBulge_redshift']] = newlens['ZSRC']
default_lensrow[self.defs_dict['galaxyDisk_redshift']] = newlens['ZSRC']
default_lensrow[self.defs_dict['galaxyAgn_redshift']] = newlens['ZSRC']
for i in range(newlens['NIMG']):
lensrow = default_lensrow.copy()
# XIMG and YIMG are in arcseconds
# raPhSim and decPhoSim are in radians
# Shift all parts of the lensed object,
# not just its agn part
delta_dec = np.radians(newlens['YIMG'][i] / 3600.0)
delta_ra = np.radians(newlens['XIMG'][i] / 3600.0)
lens_ra = lensrow[self.defs_dict['raJ2000']]
lens_dec = lensrow[self.defs_dict['decJ2000']]
lensrow[self.defs_dict['raJ2000']] = lens_ra + delta_ra/np.cos(lens_dec)
lensrow[self.defs_dict['decJ2000']] = lens_dec + delta_dec
mag_adjust = 2.5*np.log10(np.abs(newlens['MAG'][i]))
lensrow[self.defs_dict['galaxyAgn_magNorm']] -= mag_adjust
varString = json.loads(lensrow[self.defs_dict['galaxyAgn_varParamStr']])
varString[self.defs_dict['pars']]['t0Delay'] = newlens['DELAY'][i]
varString[self.defs_dict['varMethodName']] = 'applyAgnTimeDelay'
lensrow[self.defs_dict['galaxyAgn_varParamStr']] = json.dumps(varString)
if self.logging_is_sprinkled:
lensrow[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#To get back twinklesID in lens catalog from phosim catalog id number
#just use np.right_shift(phosimID-28, 10). Take the floor of the last
#3 numbers to get twinklesID in the twinkles lens catalog and the remainder is
#the image number minus 1.
if not isinstance(self.defs_dict['galtileid'], tuple):
lensrow[self.defs_dict['galtileid']] = ((lensrow[self.defs_dict['galtileid']]+int(1.5e10))*100000 +
newlens['twinklesId']*8 + i)
else:
for col_name in self.defs_dict['galtileid']:
lensrow[col_name] = ((lensrow[col_name]+int(1.5e10))*100000 +
newlens['twinklesId']*8 + i)
new_rows.append(lensrow)
#Now manipulate original entry to be the lens galaxy with desired properties
#Start by deleting Disk and AGN properties
if not np.isnan(row[self.defs_dict['galaxyDisk_magNorm']]):
row[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
row[self.defs_dict['galaxyDisk_internalAv']] = 0.0
row[self.defs_dict['galaxyDisk_magNorm']] = 999.
row[self.defs_dict['galaxyDisk_sedFilename']] = None
row[self.defs_dict['galaxyAgn_magNorm']] = None
row[self.defs_dict['galaxyDisk_magNorm']] = 999.
row[self.defs_dict['galaxyAgn_sedFilename']] = None
#Now insert desired Bulge properties
row[self.defs_dict['galaxyBulge_sedFilename']] = newlens['lens_sed']
row[self.defs_dict['galaxyBulge_redshift']] = newlens['ZLENS']
row[self.defs_dict['galaxyDisk_redshift']] = newlens['ZLENS']
row[self.defs_dict['galaxyAgn_redshift']] = newlens['ZLENS']
row_lens_sed = Sed()
row_lens_sed.readSED_flambda(os.path.join(self.sedDir,
newlens['lens_sed']))
row_lens_sed.redshiftSED(newlens['ZLENS'], dimming=True)
# Get the correct magnorm to maintain galaxy colors
row[self.defs_dict['galaxyBulge_magNorm']] = newlens['sed_magNorm'][self.lsst_band_indexes[catalog_band]]
row[self.defs_dict['galaxyBulge_majorAxis']] = radiansFromArcsec(newlens['REFF'] / np.sqrt(1 - newlens['ELLIP']))
row[self.defs_dict['galaxyBulge_minorAxis']] = radiansFromArcsec(newlens['REFF'] * np.sqrt(1 - newlens['ELLIP']))
#Convert orientation angle to west of north from east of north by *-1.0 and convert to radians
row[self.defs_dict['galaxyBulge_positionAngle']] = newlens['PHIE']*(-1.0)*np.pi/180.0
row[self.defs_dict['galaxyBulge_internalAv']] = newlens['lens_av']
row[self.defs_dict['galaxyBulge_internalRv']] = newlens['lens_rv']
if self.logging_is_sprinkled:
row[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
row[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
row[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#Replace original entry with new entry
input_catalog[rowNum] = row
for rowNum in valid_sne:
row = input_catalog[rowNum]
galtileid = row[galid_dex]
if self.cached_sprinkling is True:
if galtileid in self.sne_cache['galtileid'].values:
use_system = self.sne_cache.query('galtileid == %i' % galtileid)['twinkles_system'].values
use_df = self.sne_catalog.query('twinkles_sysno == %i' % use_system)
self.used_systems.append(use_system)
else:
continue
else:
lens_sne_candidates = self.find_sne_lens_candidates(row[self.defs_dict['galaxyDisk_redshift']])
candidate_sysno = np.unique(lens_sne_candidates['twinkles_sysno'])
num_candidates = len(candidate_sysno)
if num_candidates == 0:
continue
used_already = np.array([sys_num in self.used_systems for sys_num in candidate_sysno])
unused_sysno = candidate_sysno[~used_already]
if len(unused_sysno) == 0:
continue
rng2 = np.random.RandomState(galtileid % (2**32 -1))
use_system = rng2.choice(unused_sysno)
use_df = self.sne_catalog.query('twinkles_sysno == %i' % use_system)
default_lensrow = row.copy()
default_lensrow[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_internalAv']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_magNorm']] = 999.
default_lensrow[self.defs_dict['galaxyDisk_sedFilename']] = None
default_lensrow[self.defs_dict['galaxyBulge_majorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_minorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_positionAngle']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_internalAv']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_magNorm']] = 999.
default_lensrow[self.defs_dict['galaxyBulge_sedFilename']] = None
varString = 'None'
default_lensrow[self.defs_dict['galaxyAgn_varParamStr']] = varString
for i in range(len(use_df)):
lensrow = default_lensrow.copy()
delta_ra = np.radians(use_df['x'].iloc[i] / 3600.0)
delta_dec = np.radians(use_df['y'].iloc[i] / 3600.0)
lens_ra = lensrow[self.defs_dict['raJ2000']]
lens_dec = lensrow[self.defs_dict['decJ2000']]
lensrow[self.defs_dict['raJ2000']] = lens_ra + delta_ra/np.cos(lens_dec)
lensrow[self.defs_dict['decJ2000']] = lens_dec + delta_dec
# varString = json.loads(lensrow[self.defs_dict['galaxyAgn_varParamStr']])
z_s = use_df['zs'].iloc[i]
lensrow[self.defs_dict['galaxyBulge_redshift']] = z_s
lensrow[self.defs_dict['galaxyDisk_redshift']] = z_s
lensrow[self.defs_dict['galaxyAgn_redshift']] = z_s
#To get back twinklesID in lens catalog from phosim catalog id number
#just use np.right_shift(phosimID-28, 10). Take the floor of the last
#3 numbers to get twinklesID in the twinkles lens catalog and the remainder is
#the image number minus 1.
if not isinstance(self.defs_dict['galtileid'], tuple):
lensrow[self.defs_dict['galtileid']] = ((lensrow[self.defs_dict['galtileid']]+int(1.5e10))*100000 +
use_system*8 + i)
else:
for col_name in self.defs_dict['galtileid']:
lensrow[col_name] = ((lensrow[col_name]+int(1.5e10))*100000 +
use_system*8 + i)
(add_to_cat, sn_magnorm,
sn_fname, sn_param_dict) = self.create_sn_sed(use_df.iloc[i],
lensrow[self.defs_dict['raJ2000']],
lensrow[self.defs_dict['decJ2000']],
self.visit_mjd,
write_sn_sed=self.write_sn_sed)
if self.store_sn_truth_params:
add_to_cat = True
lensrow[self.defs_dict['galaxyAgn_sn_truth_params']] = json.dumps(sn_param_dict)
lensrow[self.defs_dict['galaxyAgn_sn_t0']] = sn_param_dict['t0']
lensrow[self.defs_dict['galaxyAgn_sedFilename']] = sn_fname
lensrow[self.defs_dict['galaxyAgn_magNorm']] = sn_magnorm #This will need to be adjusted to proper band
mag_adjust = 2.5*np.log10(np.abs(use_df['mu'].iloc[i]))
lensrow[self.defs_dict['galaxyAgn_magNorm']] -= mag_adjust
if self.logging_is_sprinkled:
lensrow[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
if add_to_cat is True:
new_rows.append(lensrow)
#Now manipulate original entry to be the lens galaxy with desired properties
#Start by deleting Disk and AGN properties
if not np.isnan(row[self.defs_dict['galaxyDisk_magNorm']]):
row[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
row[self.defs_dict['galaxyDisk_internalAv']] = 0.0
row[self.defs_dict['galaxyDisk_magNorm']] = 999.
row[self.defs_dict['galaxyDisk_sedFilename']] = None
row[self.defs_dict['galaxyAgn_magNorm']] = None
row[self.defs_dict['galaxyDisk_magNorm']] = 999.
row[self.defs_dict['galaxyAgn_sedFilename']] = None
#Now insert desired Bulge properties
row[self.defs_dict['galaxyBulge_sedFilename']] = use_df['lensgal_sed'].iloc[0]
row[self.defs_dict['galaxyBulge_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyDisk_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyAgn_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyBulge_magNorm']] = use_df['lensgal_magnorm_%s' % catalog_band].iloc[0]
# row[self.defs_dict['galaxyBulge_magNorm']] = matchBase().calcMagNorm([newlens['APMAG_I']], self.LRG, self.bandpassDict) #Changed from i band to imsimband
row[self.defs_dict['galaxyBulge_majorAxis']] = radiansFromArcsec(use_df['lensgal_reff'].iloc[0] / np.sqrt(1 - use_df['e'].iloc[0]))
row[self.defs_dict['galaxyBulge_minorAxis']] = radiansFromArcsec(use_df['lensgal_reff'].iloc[0] * np.sqrt(1 - use_df['e'].iloc[0]))
#Convert orientation angle to west of north from east of north by *-1.0 and convert to radians
row[self.defs_dict['galaxyBulge_positionAngle']] = use_df['theta_e'].iloc[0]*(-1.0)*np.pi/180.0
row[self.defs_dict['galaxyBulge_internalAv']] = use_df['lens_av'].iloc[0]
row[self.defs_dict['galaxyBulge_internalRv']] = use_df['lens_rv'].iloc[0]
if self.logging_is_sprinkled:
row[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
row[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
row[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#Replace original entry with new entry
input_catalog[rowNum] = row
if len(new_rows)>0:
input_catalog = np.append(input_catalog, new_rows)
return input_catalog
def find_lens_candidates(self, galz, gal_mag):
# search the OM10 catalog for all sources +- 0.1 dex in redshift
# and within .25 mags of the CATSIM source
w = np.where((np.abs(np.log10(self.lenscat['ZSRC']) - np.log10(galz)) <= 0.1) &
(np.abs(self.src_mag_norm - gal_mag) <= .25))[0]
lens_candidates = self.lenscat[w]
return lens_candidates
def find_sne_lens_candidates(self, galz):
w = np.where((np.abs(np.log10(self.sne_catalog['zs']) - np.log10(galz)) <= 0.1))
lens_candidates = self.sne_catalog.iloc[w]
return lens_candidates
def create_sn_sed(self, system_df, sn_ra, sn_dec, sed_mjd, write_sn_sed=True):
sn_param_dict = copy.deepcopy(self.sn_obj.SNstate)
sn_param_dict['_ra'] = sn_ra
sn_param_dict['_dec'] = sn_dec
sn_param_dict['z'] = system_df['zs']
sn_param_dict['c'] = system_df['c']
sn_param_dict['x0'] = system_df['x0']
sn_param_dict['x1'] = system_df['x1']
sn_param_dict['t0'] = system_df['t_start']
#sn_param_dict['t0'] = 62746.27 #+1500. ### For testing only
current_sn_obj = self.sn_obj.fromSNState(sn_param_dict)
current_sn_obj.mwEBVfromMaps()
wavelen_max = 1800.
wavelen_min = 30.
wavelen_step = 0.1
sn_sed_obj = current_sn_obj.SNObjectSED(time=sed_mjd,
wavelen=np.arange(wavelen_min, wavelen_max,
wavelen_step))
flux_500 = sn_sed_obj.flambda[np.where(sn_sed_obj.wavelen >= 499.99)][0]
if flux_500 > 0.:
add_to_cat = True
sn_magnorm = current_sn_obj.catsimBandMag(self.imSimBand, sed_mjd)
sn_name = None
if write_sn_sed:
sn_name = 'specFileGLSN_%i_%i_%.4f.txt' % (system_df['twinkles_sysno'],
system_df['imno'], sed_mjd)
sed_filename = '%s/%s' % (self.sed_path, sn_name)
sn_sed_obj.writeSED(sed_filename)
with open(sed_filename, 'rb') as f_in, gzip.open(str(sed_filename + '.gz'), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(sed_filename)
else:
add_to_cat = False
sn_magnorm = np.nan
sn_name = None
return add_to_cat, sn_magnorm, sn_name, current_sn_obj.SNstate
def update_catsim(self):
# Remove the catsim object
# Add lensed images to the catsim given source brightness and magnifications
# Add lens galaxy to catsim
return
def catsim_to_phosim(self):
# Pass this catsim to phosim to make images
return
| mit |
macks22/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
idlead/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
vrv/tensorflow | tensorflow/examples/learn/text_classification.py | 39 | 5106 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import encoders
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def bag_of_words_model(features, target):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(target, 15, 1, 0)
features = encoders.bow_encoder(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def rnn_model(features, target):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(target, 15, 1, 0)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
model_fn = bag_of_words_model
classifier = learn.Estimator(model_fn=model_fn)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
evan-magnusson/dynamic | Data/Calibration/Firm_Calibration_Python/parameters/employment/script_wages.py | 6 | 1821 | '''
-------------------------------------------------------------------------------
Date created: 5/22/2015
Last updated 5/22/2015
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
Packages:
-------------------------------------------------------------------------------
'''
import os.path
import sys
import numpy as np
import pandas as pd
# Find the directory of this file:
cur_dir = os.path.dirname(__file__)
# Import naics processing file:
try:
import naics_processing as naics
except ImportError:
data_struct_dir = os.path.dirname(os.path.dirname(cur_dir))
data_struct_dir += "\\data_structures"
data_struct_dir = os.path.abspath(data_struct_dir)
sys.path.append(data_struct_dir)
try:
import naics_processing as naics
except ImportError:
print "\n\n ImportError: Failed to import naics_processing \n\n"
# Import the helper functions to read in the national income data:
import read_wages_data as read_wages
'''
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
'''
data_folder = os.path.abspath(cur_dir + "\\data")
naics_codes_file = os.path.abspath(data_folder + "\\NAICS_Codes.csv")
output_folder = os.path.abspath(cur_dir + "\\output")
def main():
#
naics_tree = naics.load_naics(naics_codes_file)
#
read_wages.load_nipa_wages_ind(data_folder, naics_tree)
#
parameters = [read_wages.WAGES]
#
naics.pop_back(naics_tree, parameters)
naics.pop_forward(naics_tree, parameters, None, None, None, True)
#
naics.print_tree_dfs(naics_tree, output_folder)
if __name__ == "script_wages":
main()
| mit |
EVEprosper/ProsperAPI | tests/test_crest_endpoint.py | 1 | 14676 | from os import path, listdir, remove
import platform
import io
from datetime import datetime, timedelta
import time
import json
import pandas as pd
from tinymongo import TinyMongoClient
import pytest
from flask import url_for
import publicAPI.exceptions as exceptions
import publicAPI.config as api_utils
import helpers
HERE = path.abspath(path.dirname(__file__))
ROOT = path.dirname(HERE)
CONFIG_FILENAME = path.join(HERE, 'test_config.cfg')
CONFIG = helpers.get_config(CONFIG_FILENAME)
ROOT_CONFIG = helpers.get_config(
path.join(ROOT, 'scripts', 'app.cfg')
)
TEST_CACHE_PATH = path.join(HERE, 'cache')
CACHE_PATH = path.join(ROOT, 'publicAPI', 'cache')
BASE_URL = 'http://localhost:8000'
def test_clear_caches():
"""remove cache files for test"""
helpers.clear_caches(True)
VIRGIN_RUNTIME = None
@pytest.mark.usefixtures('client_class')
class TestODBCcsv:
"""test framework for collecting endpoint stats"""
def test_odbc_happypath(self):
"""exercise `collect_stats`"""
global VIRGIN_RUNTIME
fetch_start = time.time()
req = self.client.get(
url_for('ohlc_endpoint', return_type='csv') +
'?typeID={type_id}®ionID={region_id}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id')
)
)
fetch_end = time.time()
VIRGIN_RUNTIME = fetch_end - fetch_start
print(req.__dict__)
data = None
with io.StringIO(req.data.decode()) as buff:
data = pd.read_csv(buff)
assert req._status_code == 200
expected_headers = [
'date',
'open',
'high',
'low',
'close',
'volume'
]
assert set(expected_headers) == set(data.columns.values)
def test_odbc_happypath_cached(self):
"""rerun test with cached values"""
fetch_start = time.time()
req = self.client.get(
url_for('ohlc_endpoint', return_type='csv') +
'?typeID={type_id}®ionID={region_id}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id')
)
)
fetch_end = time.time()
runtime = fetch_end - fetch_start
if runtime > VIRGIN_RUNTIME/1.5:
pytest.xfail('cached performance slower than expected')
def test_odbc_bad_typeid(self):
"""make sure expected errors happen on bad typeid"""
req = self.client.get(
url_for('ohlc_endpoint', return_type='csv') +
'?typeID={type_id}®ionID={region_id}'.format(
type_id=CONFIG.get('TEST', 'bad_typeid'),
region_id=CONFIG.get('TEST', 'region_id')
)
)
assert req._status_code == 404
def test_odbc_bad_regionid(self):
"""make sure expected errors happen on bad typeid"""
req = self.client.get(
url_for('ohlc_endpoint', return_type='csv') +
'?typeID={type_id}®ionID={region_id}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'bad_regionid')
)
)
assert req._status_code == 404
def test_odbc_bad_format(self):
"""make sure expected errors happen on bad typeid"""
req = self.client.get(
url_for('ohlc_endpoint', return_type='butts') +
'?typeID={type_id}®ionID={region_id}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id')
)
)
assert req._status_code == 405
@pytest.mark.usefixtures('client_class')
class TestODBCjson:
"""test framework for collecting endpoint stats"""
def test_odbc_happypath(self):
"""exercise `collect_stats`"""
test_clear_caches()
global VIRGIN_RUNTIME
fetch_start = time.time()
req = self.client.get(
url_for('ohlc_endpoint', return_type='json') +
'?typeID={type_id}®ionID={region_id}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id')
)
)
fetch_end = time.time()
VIRGIN_RUNTIME = fetch_end - fetch_start
raw_data = json.loads(req.data.decode())
data = pd.DataFrame(raw_data)
assert req._status_code == 200
expected_headers = [
'date',
'open',
'high',
'low',
'close',
'volume'
]
assert set(expected_headers) == set(data.columns.values)
def test_odbc_bad_typeid(self):
"""make sure expected errors happen on bad typeid"""
req = self.client.get(
url_for('ohlc_endpoint', return_type='json') +
'?typeID={type_id}®ionID={region_id}'.format(
type_id=CONFIG.get('TEST', 'bad_typeid'),
region_id=CONFIG.get('TEST', 'region_id')
)
)
assert req._status_code == 404
def test_odbc_bad_regionid(self):
"""make sure expected errors happen on bad typeid"""
req = self.client.get(
url_for('ohlc_endpoint', return_type='json') +
'?typeID={type_id}®ionID={region_id}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'bad_regionid')
)
)
assert req._status_code == 404
TEST_API_KEY = ''
def test_get_api_key():
"""fetch api key from cache for testing"""
global TEST_API_KEY
connection = TinyMongoClient(CACHE_PATH)
api_db = connection.prosperAPI.users
vals = api_db.find()
if not vals:
pytest.xfail('Unable to test without test keys')
test_key = vals['api_key']
connection.close()
TEST_API_KEY = test_key
@pytest.mark.prophet
@pytest.mark.usefixtures('client_class')
class TestProphetcsv:
"""test framework for collecting endpoint stats"""
def test_prophet_happypath(self):
"""exercise `collect_stats`"""
test_clear_caches()
assert TEST_API_KEY != ''
global VIRGIN_RUNTIME
fetch_start = time.time()
req = self.client.get(
url_for('prophetendpoint', return_type='csv') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id'),
api_key=TEST_API_KEY,
range=CONFIG.get('TEST', 'forecast_range')
)
)
fetch_end = time.time()
VIRGIN_RUNTIME = fetch_end - fetch_start
data = None
with io.StringIO(req.data.decode()) as buff:
data = pd.read_csv(buff)
assert req._status_code == 200
expected_headers = [
'date',
'avgPrice',
'yhat',
'yhat_low',
'yhat_high',
'prediction'
]
assert set(expected_headers) == set(data.columns.values)
##TODO: validate ranges?
def test_prophet_happypath_cached(self):
"""exercise `collect_stats`"""
fetch_start = time.time()
req = self.client.get(
url_for('prophetendpoint', return_type='csv') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id'),
api_key=TEST_API_KEY,
range=CONFIG.get('TEST', 'forecast_range')
)
)
fetch_end = time.time()
runtime = fetch_end - fetch_start
if runtime > VIRGIN_RUNTIME/1.5:
pytest.xfail('cached performance slower than expected')
def test_prophet_bad_regionid(self):
"""exercise `collect_stats`"""
req = self.client.get(
url_for('prophetendpoint', return_type='csv') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'bad_regionid'),
api_key=TEST_API_KEY,
range=CONFIG.get('TEST', 'forecast_range')
)
)
assert req._status_code == 404
def test_prophet_bad_typeid(self):
"""exercise `collect_stats`"""
req = self.client.get(
url_for('prophetendpoint', return_type='csv') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'bad_typeid'),
region_id=CONFIG.get('TEST', 'region_id'),
api_key=TEST_API_KEY,
range=CONFIG.get('TEST', 'forecast_range')
)
)
assert req._status_code == 404
def test_prophet_bad_api(self):
"""exercise `collect_stats`"""
req = self.client.get(
url_for('prophetendpoint', return_type='csv') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id'),
api_key='IMAHUGEBUTT',
range=CONFIG.get('TEST', 'forecast_range')
)
)
assert req._status_code == 401
def test_prophet_bad_range(self):
"""exercise `collect_stats`"""
req = self.client.get(
url_for('prophetendpoint', return_type='csv') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id'),
api_key=TEST_API_KEY,
range=9001
)
)
assert req._status_code == 413
def test_prophet_bad_format(self):
"""exercise `collect_stats`"""
req = self.client.get(
url_for('prophetendpoint', return_type='butts') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id'),
api_key=TEST_API_KEY,
range=CONFIG.get('TEST', 'forecast_range')
)
)
assert req._status_code == 405
@pytest.mark.prophet
@pytest.mark.usefixtures('client_class')
class TestProphetjson:
"""test framework for collecting endpoint stats"""
def test_prophet_happypath(self):
"""exercise `collect_stats`"""
test_clear_caches()
global VIRGIN_RUNTIME
fetch_start = time.time()
req = self.client.get(
url_for('prophetendpoint', return_type='json') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id'),
api_key=TEST_API_KEY,
range=CONFIG.get('TEST', 'forecast_range')
)
)
fetch_end = time.time()
VIRGIN_RUNTIME = fetch_end - fetch_start
raw_data = json.loads(req.data.decode())
data = pd.DataFrame(raw_data)
assert req._status_code == 200
expected_headers = [
'date',
'avgPrice',
'yhat',
'yhat_low',
'yhat_high',
'prediction'
]
assert set(expected_headers) == set(data.columns.values)
##TODO: validate ranges?
def test_prophet_happypath_cached(self):
"""exercise `collect_stats`"""
fetch_start = time.time()
req = self.client.get(
url_for('prophetendpoint', return_type='json') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id'),
api_key=TEST_API_KEY,
range=CONFIG.get('TEST', 'forecast_range')
)
)
fetch_end = time.time()
runtime = fetch_end - fetch_start
if runtime > VIRGIN_RUNTIME/1.5:
pytest.xfail('cached performance slower than expected')
def test_prophet_bad_regionid(self):
"""exercise `collect_stats`"""
req = self.client.get(
url_for('prophetendpoint', return_type='json') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'bad_regionid'),
api_key=TEST_API_KEY,
range=CONFIG.get('TEST', 'forecast_range')
)
)
assert req._status_code == 404
def test_prophet_bad_typeid(self):
"""exercise `collect_stats`"""
req = self.client.get(
url_for('prophetendpoint', return_type='json') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'bad_typeid'),
region_id=CONFIG.get('TEST', 'region_id'),
api_key=TEST_API_KEY,
range=CONFIG.get('TEST', 'forecast_range')
)
)
assert req._status_code == 404
def test_prophet_bad_api(self):
"""exercise `collect_stats`"""
req = self.client.get(
url_for('prophetendpoint', return_type='json') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id'),
api_key='IMAHUGEBUTT',
range=CONFIG.get('TEST', 'forecast_range')
)
)
assert req._status_code == 401
def test_prophet_bad_range(self):
"""exercise `collect_stats`"""
req = self.client.get(
url_for('prophetendpoint', return_type='json') +
'?typeID={type_id}®ionID={region_id}&api={api_key}&range={range}'.format(
type_id=CONFIG.get('TEST', 'nosplit_id'),
region_id=CONFIG.get('TEST', 'region_id'),
api_key=TEST_API_KEY,
range=9000
)
)
assert req._status_code == 413
| mit |
themrmax/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 55 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
sanja7s/SR_Twitter | src_COMM/InfoMap_COMM_stats.py | 1 | 26064 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from collections import defaultdict, OrderedDict
import codecs
import matplotlib
import matplotlib.pyplot as plt
import pylab as P
import numpy as np
import networkx as nx
import time
import matplotlib.dates as mdates
import os
from igraph import *
from scipy.stats.stats import pearsonr
from scipy import stats
import seaborn as sns
sns.set(color_codes=True, font_scale=2)
sns.set_style('whitegrid')
WORKING_FOLDER = "../../../DATA/mention_graph/InfoMap"
os.chdir(WORKING_FOLDER)
F_IN = 'ment_comm'
f_in_graph = 'mention_graph_weights.dat'
# DONE
def output_basic_stats_COMM():
num_COMM = 0
COMM_sizes = []
input_file = codecs.open(F_IN, 'r', encoding='utf8')
for line in input_file:
line = line.split()
num_COMM += 1
COMM_sizes.append(len(line))
print 'Read in BigClam output: %d COMM ' % (num_COMM)
print 'Their size in decreasing order '
print sorted(COMM_sizes)
print sum(COMM_sizes)
input_file.close()
def find_nodes_in_more_COMM():
nodes_num_COMM = defaultdict(int)
input_file = codecs.open(F_IN, 'r', encoding='utf8')
for line in input_file:
node, comm = line.split()
nodes_num_COMM[int(node)] += 1
#nodes_num_COMM2 = {node: nodes_num_COMM[node] if nodes_num_COMM[node] < 10 else 10 for node in nodes_num_COMM}
sorted_nodes_num_COMM = OrderedDict(sorted(nodes_num_COMM.items(), key=lambda t:t[1]))
print len(sorted_nodes_num_COMM)
return sorted_nodes_num_COMM
def read_in_mention_graph():
d = defaultdict(int)
fn = 'mention_graph_weights.dat'
f = open(fn, 'r')
for line in f:
(u1, u2, w) = line.split()
#print u1, u2, w
d[int(u1), int(u2)] = int(w)
return d
def read_in_mention_graph_undir():
d = defaultdict(int)
fn = 'undirected_mention_graph_with_SR_weight.dat'
f = open(fn, 'r')
for line in f:
(u1, u2, w, SR) = line.split()
#print u1, u2, w
if int(u1) > int(u2):
pom = u1
u1 = u2
u2 = pom
if (int(u1), int(u2)) in d:
w1 = d[int(u1), int(u2)]
w2 = min(w1, int(w))
d[int(u1), int(u2)] = int(w2)
else:
d[int(u1), int(u2)] = int(w)
print len(d)
return d
def read_all_users():
d = []
fn = 'mention_graph_weights.dat'
f = open(fn, 'r')
for line in f:
(u1, u2, w) = line.split()
d.append(int(u1))
d.append(int(u2))
d = set(d)
print len(d)
return d
# nodes with highest avg COMM membership
def read_nodes_COMM():
COMM = 0
nodes_COMM = defaultdict(list)
input_file = codecs.open(F_IN, 'r', encoding='utf8')
for line in input_file:
line = line.split()
for node in line:
nodes_COMM[int(node)].append(COMM)
COMM += 1
return nodes_COMM
def find_node_interaction_comm_propensity():
d = read_in_mention_graph_undir()
d_all = read_all_users()
d_weak = read_in_mention_graph()
cnt_edges = 0
node_comm_membership = read_nodes_COMM()
comm_mem = defaultdict(list)
for (u1, u2) in d:
comms_u1 = node_comm_membership[u1]
comms_u2 = node_comm_membership[u2]
shared = len(set(comms_u1).intersection(set(comms_u2)))
comm_mem[shared].append(d[(u1, u2)])
cnt_edges += 1
N = cnt_edges
print N
cnt_edges = 0
comm_mem_weak = defaultdict(list)
for (u1, u2) in d_weak:
comms_u1 = node_comm_membership[u1]
comms_u2 = node_comm_membership[u2]
shared = len(set(comms_u1).intersection(set(comms_u2)))
comm_mem_weak[shared].append(d[(u1, u2)])
cnt_edges += 1
N_weak = cnt_edges
print N_weak
cnt = 0
"""
print len(d_all)
print len(d_all) * len(d_all)
full_comm_mem = defaultdict(int)
for u1 in d_all:
for u2 in d_all:
if u1 >= u2:
continue
#print u1, u2
comms_u1 = node_comm_membership[u1]
comms_u2 = node_comm_membership[u2]
shared = len(set(comms_u1).intersection(set(comms_u2)))
#shared = cnt
full_comm_mem[shared] += 1
if cnt % 1000000 == 0:
print cnt
cnt += 1
print cnt
#print full_comm_mem
"""
comm_mem2 = OrderedDict()
comm_mem3 = OrderedDict()
comm_mem4 = OrderedDict()
comm_mem5 = OrderedDict()
for comm in comm_mem:
#comm_mem2[comm] = np.mean(np.array(comm_mem[comm]))
comm_mem2[comm] = stats.mode(np.array(comm_mem[comm]))[0][0]
comm_mem3[comm] = np.std(np.array(comm_mem[comm]))
comm_mem4[comm] = float(len(np.array(comm_mem[comm]))) / float(N)
#n = full_comm_mem[comm]
#comm_mem4[comm] = float(len(np.array(comm_mem[comm]))) / float((n*(n-1)/2)) if n > 0 else 0
comm_mem5[comm] = float(len(np.array(comm_mem_weak[comm]))) / float(N_weak)
return comm_mem2, comm_mem3 , comm_mem4, comm_mem5
#find_node_interaction_comm_propensity()
def plot_interaction_comm_propensity():
cm1, cm2, cm3, cm4 = find_node_interaction_comm_propensity()
x = cm3.keys()
y = cm3.values()
#e = cm2.values()
#x = cm3.keys()
z = cm4.values()
print y
print z
plt.plot(x, y, c='r')
plt.plot(x, z, c='b')
#plt.errorbar(x, y, e)
#plt.xlim(-1,12)
#plt.yscale('log')
print pearsonr(np.array(x), np.array(y))
print pearsonr(np.array(x), np.array(z))
plt.show()
# nodes with highest avg COMM membership
def find_overlapping_COMM():
COMM = 0
nodes_num_COMM = find_nodes_in_more_COMM()
COMM_density = defaultdict(list)
input_file = codecs.open(F_IN, 'r', encoding='utf8')
for line in input_file:
line = line.split()
for node in line:
COMM_density[COMM].append(nodes_num_COMM[int(node)])
COMM += 1
COMM_density2 = defaultdict(tuple)
for COMM in COMM_density:
COMM_density2[COMM] = (np.mean(np.array(COMM_density[COMM])), len(COMM_density[COMM]))
COMM_density3 = OrderedDict(sorted(COMM_density2.items(), key=lambda t:t[1][0]))
#for COMM in COMM_density3:
# print COMM, COMM_density3[COMM]
return COMM_density3
# nodes with highest avg COMM membership
def find_overlapping_COMM_MEDIAN_density():
COMM = 0
nodes_num_COMM = find_nodes_in_more_COMM()
COMM_density = defaultdict(list)
input_file = codecs.open(F_IN, 'r', encoding='utf8')
for line in input_file:
line = line.split()
for node in line:
COMM_density[COMM].append(nodes_num_COMM[int(node)])
COMM += 1
COMM_density2 = defaultdict(tuple)
for COMM in COMM_density:
COMM_density2[COMM] = (np.median(np.array(COMM_density[COMM])), len(COMM_density[COMM]))
COMM_density3 = OrderedDict(sorted(COMM_density2.items(), key=lambda t:t[1][0]))
#for COMM in COMM_density3:
# print COMM, COMM_density3[COMM]
return COMM_density3
def plot_COMM_size_vs_MEDIAN_density():
import seaborn as sns
sns.set(color_codes=True, font_scale=2)
x = []
y = []
data = find_overlapping_COMM_MEDIAN_density()
for COMM in data:
x.append(data[COMM][0])
y.append(data[COMM][1])
x = np.array(x)
y = np.array(y)
print 'Corrcoef COMM size and density ', pearsonr(np.array(x), np.array(y))
(r, p) = pearsonr(np.array(x), np.array(y))
lab = r'$r=' + "{:.2f}".format(r) + '$, $p= ' + "{:.2f}".format(p) + '$'
xlabel = 'comm density'
ylabel = 'comm size'
#plt.scatter(x, y, edgecolors='none', c='c', label=lab)
sns.set_style("white")
g = sns.jointplot(x=x, y=y, kind='reg',annot_kws=dict(stat="r"), \
joint_kws={'line_kws':{'color':'gray', 'alpha':0.3, 'markeredgewidth':0}}).set_axis_labels(xlabel, ylabel)
regline = g.ax_joint.get_lines()[0]
regline.set_color('c')
regline.set_zorder('5')
labelsx = ['0','','1','', '2','', '3','','4']
g.ax_joint.set_xticklabels(labelsx)
#plt.legend(frameon=0, loc=2)
#plt.show()
#plt.tight_layout()
plt.savefig('node_comm_size_MEDIAN_density77.pdf',bbox_inches='tight' , dpi=550)
plt.show()
def plot_COMM_size_vs_density():
import seaborn as sns
sns.set(color_codes=True, font_scale=2)
x = []
y = []
data = find_overlapping_COMM()
for COMM in data:
x.append(data[COMM][0])
y.append(data[COMM][1])
x = np.array(x)
y = np.array(y)
print 'Corrcoef COMM size and density ', pearsonr(np.array(x), np.array(y))
(r, p) = pearsonr(np.array(x), np.array(y))
lab = r'$r=' + "{:.2f}".format(r) + '$, $p= ' + "{:.2f}".format(p) + '$'
xlabel = 'comm density'
ylabel = 'comm size'
#plt.scatter(x, y, edgecolors='none', c='c', label=lab)
sns.set_style("white")
g = sns.jointplot(x=x, y=y, kind='reg',annot_kws=dict(stat="r"), \
joint_kws={'line_kws':{'color':'gray', 'alpha':0.3, 'markeredgewidth':0}}).set_axis_labels(xlabel, ylabel)
regline = g.ax_joint.get_lines()[0]
regline.set_color('c')
regline.set_zorder('5')
labelsx = ['0','','1','', '2','', '3','','4']
g.ax_joint.set_xticklabels(labelsx)
#plt.legend(frameon=0, loc=2)
#plt.show()
#plt.tight_layout()
plt.savefig('node_comm_size_density777.pdf',bbox_inches='tight' , dpi=550)
plt.show()
#########################
# read from a file that is an edge list with weights
#########################
def read_in_graph():
G = Graph.Read_Ncol(f_in_graph, directed=True, weights=True)
print f_in_graph
print G.summary()
return G
#########################
# read from a file that is an edge list with SR weights
#########################
def read_in_SR_graph():
G = Graph.Read_Ncol('undirected_mention_graph_with_SR.csv', directed=False, weights=True)
print G.summary()
return G
def find_avg_neighborhood_SR_per_node_COMM_membership():
node_comm_membership = find_nodes_in_more_COMM()
G = read_in_SR_graph()
res = defaultdict(list)
for node in node_comm_membership:
n = G.vs.select(name = str(node))
nCOMM = node_comm_membership[node] if node_comm_membership[node] < 10 else 10
total_SR = G.strength(n[0].index, weights='weight')
total_neighbors = G.degree(n[0].index)
meanSR = total_SR / float(total_neighbors)
res[nCOMM].append(meanSR)
res_mean = defaultdict(float)
res_stdev = defaultdict(float)
for COMM in res:
res_mean[COMM] = np.mean(np.array(res[COMM]))
res_stdev[COMM] = np.std(np.array(res[COMM]))
return res_mean, res_stdev
def read_sem_capital(f_name='user_entities.tab', tname='entities'):
f = open(f_name, "r")
cap = defaultdict(int)
cnt = 0
for line in f:
if tname == 'sentiment':
(vid, vn, val) = line.split('\t')
val = float(val)
else:
(vid, val) = line.split('\t')
val = float(val)
cap[vid] = val
cnt += 1
return cap
def find_avg_ST_INC_per_node_COMM_membership():
node_comm_membership = find_nodes_in_more_COMM()
sem_cap = read_sem_capital(f_name='status_inconsistency', tname='status_inconsistency')
res = defaultdict(list)
for node in node_comm_membership:
n_sem = sem_cap[str(node)]
nCOMM = node_comm_membership[node] if node_comm_membership[node] < 10 else 10
res[nCOMM].append(n_sem)
res_mean = defaultdict(float)
res_stdev = defaultdict(float)
for COMM in res:
res_mean[COMM] = np.mean(np.array(res[COMM]))
res_stdev[COMM] = np.std(np.array(res[COMM]))
return res_mean, res_stdev
def find_MEDIAN_ST_INC_per_node_COMM_membership():
node_comm_membership = find_nodes_in_more_COMM()
sem_cap = read_sem_capital(f_name='status_inconsistency', tname='status_inconsistency')
res = defaultdict(list)
for node in node_comm_membership:
n_sem = sem_cap[str(node)]
nCOMM = node_comm_membership[node] if node_comm_membership[node] < 10 else 10
res[nCOMM].append(abs(n_sem))
res_mean = defaultdict(float)
res_stdev = defaultdict(float)
for COMM in res:
res_mean[COMM] = np.median(np.array(res[COMM]))
res_stdev[COMM] = np.std(np.array(res[COMM]))
return res_mean, res_stdev
def find_avg_SEM_cap_per_node_COMM_membership():
node_comm_membership = find_nodes_in_more_COMM()
sem_cap = read_sem_capital()
res = defaultdict(list)
for node in node_comm_membership:
n_sem = sem_cap[str(node)]
nCOMM = node_comm_membership[node] if node_comm_membership[node] < 10 else 10
res[nCOMM].append(n_sem)
res_mean = defaultdict(float)
res_stdev = defaultdict(float)
for COMM in res:
res_mean[COMM] = np.mean(np.array(res[COMM]))
res_stdev[COMM] = np.std(np.array(res[COMM]))
return res_mean, res_stdev
def find_MEDIAN_SEM_cap_per_node_COMM_membership():
node_comm_membership = find_nodes_in_more_COMM()
sem_cap = read_sem_capital()
res = defaultdict(list)
for node in node_comm_membership:
n_sem = sem_cap[str(node)]
nCOMM = node_comm_membership[node] if node_comm_membership[node] < 10 else 10
res[nCOMM].append(n_sem)
res_mean = defaultdict(float)
res_stdev = defaultdict(float)
for COMM in res:
res_mean[COMM] = np.median(np.array(res[COMM]))
res_stdev[COMM] = np.std(np.array(res[COMM]))
return res_mean, res_stdev
def find_avg_sentiment_per_node_COMM_membership():
node_comm_membership = find_nodes_in_more_COMM()
sem_cap = read_sem_capital(f_name='user_sentiment.tab', tname='sentiment')
res = defaultdict(list)
for node in node_comm_membership:
n_sem = sem_cap[str(node)]
nCOMM = node_comm_membership[node] if node_comm_membership[node] < 35 else 35
res[nCOMM].append(n_sem)
res_mean = defaultdict(float)
res_stdev = defaultdict(float)
for COMM in res:
res_mean[COMM] = np.median(np.array(res[COMM]))
res_stdev[COMM] = np.std(np.array(res[COMM]))
return res_mean, res_stdev
def find_avg_deg_per_node_COMM_membership():
node_comm_membership = find_nodes_in_more_COMM()
G = read_in_graph()
G_undir = G.copy()
# this copy is then transformed into undir weighted mutual
G_undir.to_undirected(mode="mutual", combine_edges='sum')
res = defaultdict(list)
for node in node_comm_membership:
n = G_undir.vs.select(name = str(node))
nCOMM = node_comm_membership[node] #if node_comm_membership[node] < 10 else 10
try:
res[nCOMM].append(G_undir.degree(n[0].index))
except IndexError:
print 'Index Error'
res_mean = defaultdict(float)
res_stdev = defaultdict(float)
for COMM in res:
res_mean[COMM] = np.mean(np.array(res[COMM]))
res_stdev[COMM] = np.std(np.array(res[COMM]))
return res_mean, res_stdev
def find_avg_WEIGHTED_deg_per_node_COMM_membership():
node_comm_membership = find_nodes_in_more_COMM()
G = read_in_graph()
G_undir = G.copy()
# this copy is then transformed into undir weighted mutual
G_undir.to_undirected(mode="mutual", combine_edges='min')
res = defaultdict(list)
for node in node_comm_membership:
n = G_undir.vs.select(name = str(node))
nCOMM = node_comm_membership[node] if node_comm_membership[node] < 40 else 40
try:
res[nCOMM].append(G_undir.degree(n[0].index))
except IndexError:
print 'IndexError'
res_mean = defaultdict(float)
res_stdev = defaultdict(float)
for COMM in res:
res_mean[COMM] = np.mean(np.array(res[COMM]))
res_stdev[COMM] = np.std(np.array(res[COMM]))
return res_mean, res_stdev
def find_avg_DIR_deg_per_node_COMM_membership():
node_comm_membership = find_nodes_in_more_COMM()
G = read_in_graph()
res_IN = defaultdict(list)
res_OUT = defaultdict(list)
cnt_IE = 0
for node in node_comm_membership:
try:
n = G.vs.select(name = str(node))
IN_deg = G.strength(n[0].index, weights='weight', mode=IN)
OUT_deg = G.strength(n[0].index, weights='weight', mode=OUT)
nCOMM = node_comm_membership[node] if node_comm_membership[node] < 40 else 40
res_IN[nCOMM].append(IN_deg)
res_OUT[nCOMM].append(OUT_deg)
except IndexError:
print 'IndexError'
cnt_IE += 1
print 'Skipped nodes ', cnt_IE
res_IN_mean = defaultdict(float)
res_IN_std = defaultdict(float)
res_OUT_mean = defaultdict(float)
res_OUT_std = defaultdict(float)
for COMM in res_IN:
res_IN_mean[COMM] = np.mean(np.array(res_IN[COMM]))
res_OUT_mean[COMM] = np.mean(np.array(res_OUT[COMM]))
res_IN_std[COMM] = np.std(np.array(res_IN[COMM]))
res_OUT_std[COMM] = np.std(np.array(res_OUT[COMM]))
return res_IN_mean, res_IN_std, res_OUT_mean, res_OUT_std
def find_median_DIR_deg_per_node_COMM_membership():
node_comm_membership = find_nodes_in_more_COMM()
G = read_in_graph()
res_IN = defaultdict(list)
res_OUT = defaultdict(list)
for node in node_comm_membership:
n = G.vs.select(name = str(node))
IN_deg = G.strength(n[0].index, weights='weight', mode=IN)
OUT_deg = G.strength(n[0].index, weights='weight', mode=OUT)
nCOMM = node_comm_membership[node] if node_comm_membership[node] < 10 else 10
res_IN[nCOMM].append(IN_deg)
res_OUT[nCOMM].append(OUT_deg)
res_IN_m = defaultdict(float)
res_IN_std = defaultdict(float)
res_OUT_m = defaultdict(float)
res_OUT_std = defaultdict(float)
for COMM in res_IN:
res_IN_m[COMM] = np.median(np.array(res_IN[COMM]))
res_OUT_m[COMM] = np.median(np.array(res_OUT[COMM]))
res_IN_std[COMM] = np.std(np.array(res_IN[COMM]))
res_OUT_std[COMM] = np.std(np.array(res_OUT[COMM]))
return res_IN_m, res_IN_std, res_OUT_m, res_OUT_std
def calculate_pdf(ydata, logscale=True):
x = np.array(ydata)
mu = np.mean(x)
sigma = np.std(x)
print '$\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
lab = ' $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
num_bins = 10
# the histogram of the data
n, bins, patches = plt.hist(x, normed=1, bins=num_bins)
plt.clf() # Get rid of this histogram since not the one we want.
nx_frac = n/float(len(n)) # Each bin divided by total number of objects.
width = bins[1] - bins[0] # Width of each bin.
x = np.ravel(zip(bins[:-1], bins[:-1]+width))
y = np.ravel(zip(nx_frac,nx_frac))
return x, y, lab
def plot_pdf_node_in_COMM():
fig7s = plt.gcf()
fig7s.set_size_inches((8,6))
node_in_COMM = find_nodes_in_more_COMM()
ydata = node_in_COMM.values()
x, y,lab = calculate_pdf(ydata)
plt.plot(x,y,linestyle="-",color='red',label=lab)
plt.yscale('log', nonposy='clip')
plt.xlabel('node comm membership')
plt.ylabel('p(node comm membership)')
plt.legend(loc='best', frameon=0)
plt.xlim(1,10)
#plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
plt.grid(True)
plt.tight_layout()
plt.savefig('node_comm_membership_pdf777.eps',bbox_inches='tight' , dpi=550)
def plot_deg_vs_COMM_membership():
d, std = find_avg_deg_per_node_COMM_membership()
x = d.keys()
y = d.values()
e = std.values()
plt.errorbar(x,y,e,linestyle="-",marker='*',color='red',label='mean contacts per comm membership ')
#plt.yscale('log', nonposy='clip')
plt.xlabel('node comm membership')
plt.ylabel('mean node contacts')
plt.legend(loc='best',frameon=False)
plt.xlim(0,11)
#plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
plt.grid(True)
plt.savefig('node_comm_membership_vs_avg_deg4.eps', dpi=550)
plt.show()
def plot_WEIGHTED_deg_vs_COMM_membership():
d, std = find_avg_WEIGHTED_deg_per_node_COMM_membership()
x = d.keys()
y = d.values()
e = std.values()
fig7s = plt.gcf()
fig7s.set_size_inches((8,6))
sns.set_style('white')
print 'Corrcoef strong commun int and comm membership ', pearsonr(np.array(x), np.array(y))
plt.errorbar(x,y,e,linestyle="-",marker='*',color='maroon',label='mean strong communication intensity')
#plt.yscale('log', nonposy='clip')
plt.xlabel('node comm membership')
plt.ylabel('mean strong communication intensity')
plt.legend(loc='best',frameon=False)
plt.xlim(0,11)
#plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
plt.grid(True)
plt.savefig('node_comm_membership_vs_mean_strong_comm_intensity_40.eps', dpi=550)
plt.show()
def plot_avg_neighborhood_SR_vs_COMM_membership():
plt.rcParams['figure.figsize']=(6,6)
fig7s = plt.gcf()
fig7s.set_size_inches((6,6))
d, std = find_avg_neighborhood_SR_per_node_COMM_membership()
x = d.keys()
y = d.values()
e = std.values()
print 'Corrcoef SR and comm membership ', pearsonr(np.array(x), np.array(y))
plt.errorbar(x,y,e,linestyle="-",marker='*',color='darkgreen',fmt='o',elinewidth=3.4)
#,label='mean node neighborhood SR per comm membership ')
#plt.yscale('log', nonposy='clip')
plt.xlabel('node comm membership')
plt.ylabel('mean node SR with neighbors')
#plt.legend(loc='best',frameon=False)
plt.xlim(0,11)
plt.tight_layout()
#plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
#plt.grid(True)
plt.savefig('node_comm_membership_vs_mean_SR77.eps', bbox_inches='tight' ,dpi=550)
plt.show()
def plot_SEM_CAP_vs_COMM_membership():
d, std = find_avg_SEM_cap_per_node_COMM_membership()
x = d.keys()
y = d.values()
e = std.values()
plt.rcParams['figure.figsize']=(6,6)
fig7s = plt.gcf()
fig7s.set_size_inches((6,6))
plt.errorbar(x,y,e,linestyle="-",marker='*',color='darkred',fmt='o',elinewidth=3.4)
#plt.yscale('log', nonposy='clip')
plt.xlabel('node comm membership')
plt.ylabel('mean node semantic capital')
plt.legend(loc=2,frameon=False)
plt.xlim(0,11)
#plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
#plt.grid(True)
plt.tight_layout()
plt.savefig('node_comm_membership_vs_avg_SEM_CAP77.eps', bbox_inches='tight', dpi=550)
plt.show()
def plot_MEDIAN_SEM_CAP_vs_COMM_membership():
d, std = find_MEDIAN_SEM_cap_per_node_COMM_membership()
x = d.keys()
y = d.values()
e = std.values()
plt.errorbar(x,y,e,linestyle="-",marker='*',color='darkred',label='mean sem capital per comm membership ')
#plt.yscale('log', nonposy='clip')
plt.xlabel('node comm membership')
plt.ylabel('mean node semantic capital')
plt.legend(loc=2,frameon=False)
plt.xlim(0,11)
#plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
#plt.grid(True)
plt.savefig('node_comm_membership_vs_MEDIAN_SEM_CAP1.eps', dpi=550)
plt.show()
def plot_ST_INC_vs_COMM_membership():
d, std = find_avg_ST_INC_per_node_COMM_membership()
x = d.keys()
y = d.values()
e = std.values()
print 'Corrcoef ST INC and comm membership ', pearsonr(np.array(x), np.array(y))
plt.errorbar(x,y,e,linestyle="-",marker='*',color='darkcyan',label='mean status inconsistency per comm membership ')
#plt.yscale('log', nonposy='clip')
plt.xlabel('node comm membership')
plt.ylabel('mean node status inconsistency')
plt.legend(loc=2,frameon=False)
plt.xlim(0,11)
#plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
#plt.grid(True)
plt.savefig('node_comm_membership_vs_mean_ST_INC2.eps', dpi=550)
plt.show()
def plot_MEDIAN_ST_INC_vs_COMM_membership():
d, std = find_avg_ST_INC_per_node_COMM_membership()
x = d.keys()
y = d.values()
e = std.values()
plt.rcParams['figure.figsize']=(6,6)
fig7s = plt.gcf()
fig7s.set_size_inches((6,6))
print 'Corrcoef MEDIAN ST INC and comm membership ', pearsonr(np.array(x), np.array(y))
plt.errorbar(x,y,e,linestyle="-",marker='*',fmt='o',elinewidth=3.4,color='darkcyan')
plt.xlabel('node comm membership')
plt.ylabel(r'mean node $st_{inc}$')
plt.legend(loc=2,frameon=False)
plt.xlim(0,11)
plt.tight_layout()
#plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
#plt.grid(True)
plt.savefig('node_comm_membership_vs_MEDIAN_ST_INC77.eps', bbox_inches='tight' , dpi=550)
plt.show()
def plot_sentiment_vs_COMM_membership():
d, std = find_avg_sentiment_per_node_COMM_membership()
x = d.keys()
y = d.values()
e = std.values()
print 'Corrcoef sentiment and comm membership ', pearsonr(np.array(x), np.array(y))
plt.errorbar(x,y,e,linestyle="-",marker='*',color='darkcyan',label='mean sentiment per comm membership ')
#plt.yscale('log', nonposy='clip')
plt.xlabel('node comm membership')
plt.ylabel('mean node sentiment')
#plt.legend(loc=2,frameon=False)
#plt.xlim(0,11)
#plt.ylim(-0.1,0.1)
#plt.title(r'Histogram for mention network pairwise SR: $\mu=' + "{:.3f}".format(mu) + '$, $\sigma= ' + "{:.3f}".format(sigma) + '$')
#plt.grid(True)
plt.savefig('node_comm_membership_vs_avg_sentiment_35.eps', dpi=550)
plt.show()
def plot_DIR_deg_vs_COMM_membership():
plt.rcParams['figure.figsize']=(6,6)
fig7s = plt.gcf()
fig7s.set_size_inches((6,6))
rIN_deg, rIN_std, rOUT_deg, rOUT_std = find_avg_DIR_deg_per_node_COMM_membership()
x1 = np.array(rIN_deg.keys())
y1 = np.array(rIN_deg.values())
e1 = np.array(rIN_std.values())
print 'Corrcoef weighted INdeg and comm membership ', pearsonr(np.array(x1), np.array(y1))
x2 = np.array(rOUT_deg.keys())
y2 = np.array(rOUT_deg.values())
e2 = np.array(rOUT_std.values())
print 'Corrcoef weighted OUTdeg and comm membership ', pearsonr(np.array(x2), np.array(y2))
plt.errorbar(x1-0.07,y1,e1,linestyle="-",marker='*',fmt='o',elinewidth=3.4,color='darkred',label='popularity')
plt.errorbar(x2+0.07,y2,e2,linestyle="-",marker='*',fmt='o',elinewidth=3.4,color='darkblue',label='activity')
plt.xlabel('node comm membership')
plt.ylabel('mean node social capital')
plt.legend(loc='best',frameon=False)
#plt.xlim(0,11)
plt.tight_layout()
#plt.grid(True)
plt.savefig('node_comm_membership_vs_avg_DIR_deg_40.eps', bbox_inches='tight' , dpi=550)
plt.show()
def plot_MEDIAN_DIR_deg_vs_COMM_membership():
rIN_deg, rIN_std, rOUT_deg, rOUT_std = find_median_DIR_deg_per_node_COMM_membership()
x1 = rIN_deg.keys()
y1 = rIN_deg.values()
e1 = rIN_std.values()
x2 = rOUT_deg.keys()
y2 = rOUT_deg.values()
e2 = rOUT_std.values()
plt.errorbar(x1,y1,e1,linestyle="-",marker='*',color='darkred',label='median popularity per comm membership ')
plt.errorbar(x2,y2,e2,linestyle="-",marker='*',color='darkblue',label='median activity per comm membership ')
plt.xlabel('node comm membership')
plt.ylabel('median node social capital')
plt.legend(loc='best',frameon=False)
plt.xlim(0,11)
#plt.grid(True)
plt.savefig('node_comm_membership_vs_DIR_deg_40.eps', dpi=550)
plt.show()
#plot_DIR_deg_vs_COMM_membership()
#plot_WEIGHTED_deg_vs_COMM_membership()
#plot_MEDIAN_ST_INC_vs_COMM_membership()
#plot_sentiment_vs_COMM_membership()
#plot_avg_neighborhood_SR_vs_COMM_membership()
#plot_SEM_CAP_vs_COMM_membership()
#plot_DIR_deg_vs_COMM_membership()
plot_DIR_deg_vs_COMM_membership()
#plot_deg_vs_COMM_membership()
#output_basic_stats_BigClam_COMM()
#find_nodes_in_more_COMM()
#plot_pdf_node_in_COMM()
#plot_MEDIAN_SEM_CAP_vs_COMM_membership()
#plot_COMM_size_vs_density()
#plot_COMM_size_vs_MEDIAN_density()
| mit |
kmather73/pymc3 | pymc3/model.py | 5 | 16723 | from .vartypes import *
from theano import theano, tensor as t, function
from theano.tensor.var import TensorVariable
import numpy as np
from functools import wraps
from .theanof import *
from inspect import getargspec
from .memoize import memoize
__all__ = ['Model', 'Factor', 'compilef', 'fn', 'fastfn', 'modelcontext', 'Point', 'Deterministic', 'Potential']
class Context(object):
"""Functionality for objects that put themselves in a context using the `with` statement."""
def __enter__(self):
type(self).get_contexts().append(self)
return self
def __exit__(self, typ, value, traceback):
type(self).get_contexts().pop()
@classmethod
def get_contexts(cls):
if not hasattr(cls, "contexts"):
cls.contexts = []
return cls.contexts
@classmethod
def get_context(cls):
"""Return the deepest context on the stack."""
try:
return cls.get_contexts()[-1]
except IndexError:
raise TypeError("No context on context stack")
def modelcontext(model):
"""return the given model or try to find it in the context if there was none supplied."""
if model is None:
return Model.get_context()
return model
class Factor(object):
"""Common functionality for objects with a log probability density associated with them."""
@property
def logp(self):
"""Compiled log probability density function"""
return self.model.fn(self.logpt)
@property
def logp_elemwise(self):
return self.model.fn(self.logp_elemwiset)
def dlogp(self, vars=None):
"""Compiled log probability density gradient function"""
return self.model.fn(gradient(self.logpt, vars))
def d2logp(self, vars=None):
"""Compiled log probability density hessian function"""
return self.model.fn(hessian(self.logpt, vars))
@property
def fastlogp(self):
"""Compiled log probability density function"""
return self.model.fastfn(self.logpt)
def fastdlogp(self, vars=None):
"""Compiled log probability density gradient function"""
return self.model.fastfn(gradient(self.logpt, vars))
def fastd2logp(self, vars=None):
"""Compiled log probability density hessian function"""
return self.model.fastfn(hessian(self.logpt, vars))
@property
def logpt(self):
"""Theano scalar of log-probability of the model"""
return t.sum(self.logp_elemwiset)
class Model(Context, Factor):
"""Encapsulates the variables and likelihood factors of a model."""
def __init__(self):
self.named_vars = {}
self.free_RVs = []
self.observed_RVs = []
self.deterministics = []
self.potentials = []
self.missing_values = []
self.model = self
@property
@memoize
def logpt(self):
"""Theano scalar of log-probability of the model"""
factors = [var.logpt for var in self.basic_RVs] + self.potentials
return t.add(*map(t.sum, factors))
@property
def vars(self):
"""List of unobserved random variables used as inputs to the model (which excludes deterministics)."""
return self.free_RVs
@property
def basic_RVs(self):
"""List of random variables the model is defined in terms of (which excludes deterministics)."""
return (self.free_RVs + self.observed_RVs)
@property
def unobserved_RVs(self):
"""List of all random variable, including deterministic ones."""
return self.vars + self.deterministics
@property
def test_point(self):
"""Test point used to check that the model doesn't generate errors"""
return Point(((var, var.tag.test_value) for var in self.vars),
model=self)
@property
def disc_vars(self):
"""All the discrete variables in the model"""
return list(typefilter(self.vars, discrete_types))
@property
def cont_vars(self):
"""All the continuous variables in the model"""
return list(typefilter(self.vars, continuous_types))
def Var(self, name, dist, data=None):
"""Create and add (un)observed random variable to the model with an appropriate prior distribution.
Parameters
----------
name : str
dist : distribution for the random variable
data : arraylike (optional)
if data is provided, the variable is observed. If None, the variable is unobserved.
Returns
-------
FreeRV or ObservedRV
"""
if data is None:
if getattr(dist, "transform", None) is None:
var = FreeRV(name=name, distribution=dist, model=self)
self.free_RVs.append(var)
else:
var = TransformedRV(name=name, distribution=dist, model=self, transform=dist.transform)
self.deterministics.append(var)
return var
elif isinstance(data, dict):
var = MultiObservedRV(name=name, data=data, distribution=dist, model=self)
self.observed_RVs.append(var)
if var.missing_values:
self.free_RVs += var.missing_values
self.missing_values += var.missing_values
for v in var.missing_values:
self.named_vars[v.name] = v
else:
var = ObservedRV(name=name, data=data, distribution=dist, model=self)
self.observed_RVs.append(var)
if var.missing_values:
self.free_RVs.append(var.missing_values)
self.missing_values.append(var.missing_values)
self.named_vars[var.missing_values.name] = var.missing_values
self.add_random_variable(var)
return var
def add_random_variable(self, var):
"""Add a random variable to the named variables of the model."""
self.named_vars[var.name] = var
if not hasattr(self, var.name):
setattr(self, var.name, var)
def __getitem__(self, key):
return self.named_vars[key]
@memoize
def makefn(self, outs, mode=None, *args, **kwargs):
"""Compiles a Theano function which returns `outs` and takes the variable
ancestors of `outs` as inputs.
Parameters
----------
outs : Theano variable or iterable of Theano variables
mode : Theano compilation mode
Returns
-------
Compiled Theano function"""
return function(self.vars, outs,
allow_input_downcast=True,
on_unused_input='ignore',
accept_inplace=True,
mode=mode, *args, **kwargs)
def fn(self, outs, mode=None, *args, **kwargs):
"""Compiles a Theano function which returns the values of `outs` and takes values of model
vars as arguments.
Parameters
----------
outs : Theano variable or iterable of Theano variables
mode : Theano compilation mode
Returns
-------
Compiled Theano function"""
return LoosePointFunc(self.makefn(outs, mode, *args, **kwargs), self)
def fastfn(self, outs, mode=None, *args, **kwargs):
"""Compiles a Theano function which returns `outs` and takes values of model
vars as a dict as an argument.
Parameters
----------
outs : Theano variable or iterable of Theano variables
mode : Theano compilation mode
Returns
-------
Compiled Theano function as point function."""
f = self.makefn(outs, mode, *args, **kwargs)
return FastPointFunc(f)
def profile(self, outs, n=1000, point=None, profile=True, *args, **kwargs):
"""Compiles and profiles a Theano function which returns `outs` and takes values of model
vars as a dict as an argument.
Parameters
----------
outs : Theano variable or iterable of Theano variables
n : int, default 1000
Number of iterations to run
point : point
Point to pass to the function
profile : True or ProfileStats
*args, **kwargs
Compilation args
Returns
-------
ProfileStats
Use .summary() to print stats."""
f = self.makefn(outs, profile=profile, *args, **kwargs)
if point is None:
point = self.test_point
for i in range(n):
f(**point)
return f.profile
def fn(outs, mode=None, model=None, *args, **kwargs):
"""Compiles a Theano function which returns the values of `outs` and takes values of model
vars as arguments.
Parameters
----------
outs : Theano variable or iterable of Theano variables
mode : Theano compilation mode
Returns
-------
Compiled Theano function"""
model = modelcontext(model)
return model.fn(outs,mode, *args, **kwargs)
def fastfn(outs, mode=None, model=None):
"""Compiles a Theano function which returns `outs` and takes values of model
vars as a dict as an argument.
Parameters
----------
outs : Theano variable or iterable of Theano variables
mode : Theano compilation mode
Returns
-------
Compiled Theano function as point function."""
model = modelcontext(model)
return model.fastfn(outs,mode)
def Point(*args, **kwargs):
"""Build a point. Uses same args as dict() does.
Filters out variables not in the model. All keys are strings.
Parameters
----------
*args, **kwargs
arguments to build a dict"""
model = modelcontext(kwargs.pop('model', None))
args = [a for a in args]
try:
d = dict(*args, **kwargs)
except Exception as e:
raise TypeError(
"can't turn " + str(args) + " and " + str(kwargs) +
" into a dict. " + str(e))
varnames = list(map(str, model.vars))
return dict((str(k), np.array(v))
for (k, v) in d.items()
if str(k) in varnames)
class FastPointFunc(object):
"""Wraps so a function so it takes a dict of arguments instead of arguments."""
def __init__(self, f):
self.f = f
def __call__(self, state):
return self.f(**state)
class LoosePointFunc(object):
"""Wraps so a function so it takes a dict of arguments instead of arguments
but can still take arguments."""
def __init__(self, f, model):
self.f = f
self.model = model
def __call__(self, *args, **kwargs):
point = Point(model=self.model, *args, **kwargs)
return self.f(**point)
compilef = fastfn
class FreeRV(Factor, TensorVariable):
"""Unobserved random variable that a model is specified in terms of."""
def __init__(self, type=None, owner=None, index=None, name=None, distribution=None, model=None):
"""
Parameters
----------
type : theano type (optional)
owner : theano owner (optional)
name : str
distribution : Distribution
model : Model"""
if type is None:
type = distribution.type
super(FreeRV, self).__init__(type, owner, index, name)
if distribution is not None:
self.dshape = tuple(distribution.shape)
self.dsize = int(np.prod(distribution.shape))
self.distribution = distribution
self.tag.test_value = np.ones(
distribution.shape, distribution.dtype) * distribution.default()
self.logp_elemwiset = distribution.logp(self)
self.model = model
def pandas_to_array(data):
if hasattr(data, 'values'): #pandas
if data.isnull().any().any(): #missing values
return np.ma.MaskedArray(data.values, data.isnull().values)
else:
return data.values
elif hasattr(data, 'mask'):
return data
elif isinstance(data, theano.gof.graph.Variable):
return data
else:
return np.asarray(data)
def as_tensor(data, name,model, dtype):
data = pandas_to_array(data).astype(dtype)
if hasattr(data, 'mask'):
from .distributions import NoDistribution
fakedist = NoDistribution.dist(shape=data.mask.sum(), dtype=dtype, testval=data.mean().astype(dtype))
missing_values = FreeRV(name=name + '_missing', distribution=fakedist, model=model)
constant = t.as_tensor_variable(data.filled())
dataTensor = theano.tensor.set_subtensor(constant[data.mask.nonzero()], missing_values)
dataTensor.missing_values = missing_values
return dataTensor
else:
data = t.as_tensor_variable(data, name=name)
data.missing_values = None
return data
class ObservedRV(Factor, TensorVariable):
"""Observed random variable that a model is specified in terms of.
Potentially partially observed.
"""
def __init__(self, type=None, owner=None, index=None, name=None, data=None, distribution=None, model=None):
"""
Parameters
----------
type : theano type (optional)
owner : theano owner (optional)
name : str
distribution : Distribution
model : Model
"""
from .distributions import TensorType
if type is None:
data = pandas_to_array(data)
type = TensorType(distribution.dtype, data.shape)
super(TensorVariable, self).__init__(type, None, None, name)
if distribution is not None:
data = as_tensor(data, name,model,distribution.dtype)
self.missing_values = data.missing_values
self.logp_elemwiset = distribution.logp(data)
self.model = model
self.distribution = distribution
#make this RV a view on the combined missing/nonmissing array
theano.gof.Apply(theano.compile.view_op, inputs=[data], outputs=[self])
self.tag.test_value = theano.compile.view_op(data).tag.test_value
class MultiObservedRV(Factor):
"""Observed random variable that a model is specified in terms of.
Potentially partially observed.
"""
def __init__(self, name, data, distribution, model):
"""
Parameters
----------
type : theano type (optional)
owner : theano owner (optional)
name : str
distribution : Distribution
model : Model
"""
self.name = name
self.data = { name : as_tensor(data, name, model, distribution.dtype) for name, data in data.items()}
self.missing_values = [ data.missing_values for data in self.data.values() if data.missing_values is not None]
self.logp_elemwiset = distribution.logp(**self.data)
self.model = model
self.distribution = distribution
def Deterministic(name, var, model=None):
"""Create a named deterministic variable
Parameters
----------
name : str
var : theano variables
Returns
-------
n : var but with name name"""
var.name = name
modelcontext(model).deterministics.append(var)
modelcontext(model).add_random_variable(var)
return var
def Potential(name, var, model=None):
"""Add an arbitrary factor potential to the model likelihood
Parameters
----------
name : str
var : theano variables
Returns
-------
var : var, with name attribute
"""
var.name = name
modelcontext(model).potentials.append(var)
return var
class TransformedRV(TensorVariable):
def __init__(self, type=None, owner=None, index=None, name=None, distribution=None, model=None, transform=None):
"""
Parameters
----------
type : theano type (optional)
owner : theano owner (optional)
name : str
distribution : Distribution
model : Model"""
if type is None:
type = distribution.type
super(TransformedRV, self).__init__(type, owner, index, name)
if distribution is not None:
self.model = model
self.transformed = model.Var(name + "_" + transform.name, transform.apply(distribution))
normalRV = transform.backward(self.transformed)
theano.Apply(theano.compile.view_op, inputs=[normalRV], outputs=[self])
self.tag.test_value = normalRV.tag.test_value
def as_iterargs(data):
if isinstance(data, tuple):
return data
else:
return [data]
# theano stuff
theano.config.warn.sum_div_dimshuffle_bug = False
theano.config.compute_test_value = 'raise'
| apache-2.0 |
rabipanda/tensorflow | tensorflow/examples/tutorials/input_fn/boston.py | 76 | 2920 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def get_input_fn(data_set, num_epochs=None, shuffle=True):
return tf.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({k: data_set[k].values for k in FEATURES}),
y=pd.Series(data_set[LABEL].values),
num_epochs=num_epochs,
shuffle=shuffle)
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Train
regressor.train(input_fn=get_input_fn(training_set), steps=5000)
# Evaluate loss over one epoch of test_set.
ev = regressor.evaluate(
input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False))
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions over a slice of prediction_set.
y = regressor.predict(
input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False))
# .predict() returns an iterator of dicts; convert to a list and print
# predictions
predictions = list(p["predictions"] for p in itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
lxybox1/MissionPlanner | Lib/site-packages/numpy/lib/twodim_base.py | 70 | 23431 | """ Basic functions for manipulating 2d arrays
"""
__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu',
'tril','vander','histogram2d','mask_indices',
'tril_indices','tril_indices_from','triu_indices','triu_indices_from',
]
from numpy.core.numeric import asanyarray, equal, subtract, arange, \
zeros, greater_equal, multiply, ones, asarray, alltrue, where, \
empty
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Does not require the array to be
two-dimensional.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1,...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0,1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0,1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triange of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n,n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
if k >= s[1]:
return empty(0, dtype=v.dtype)
if v.flags.f_contiguous:
# faster slicing
v, k, s = v.T, -k, s[::-1]
if k >= 0:
i = k
else:
i = (-k) * s[1]
return v[:s[1]-k].flat[i::s[1]+1]
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n,n), v.dtype)
if (k >= 0):
i = arange(0,n-k)
fi = i+k+i*n
else:
i = arange(0,n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
T : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal(subtract.outer(arange(N), arange(M)),-k)
return m.astype(dtype)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
L : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=int),m)
return out
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
out = multiply((1 - tri(m.shape[0], m.shape[1], k - 1, int)), m)
return out
# borrowed from John Hunter and matplotlib
def vander(x, N=None):
"""
Generate a Van der Monde matrix.
The columns of the output matrix are decreasing powers of the input
vector. Specifically, the `i`-th output column is the input vector
raised element-wise to the power of ``N - i - 1``. Such a matrix with
a geometric progression in each row is named for Alexandre-Theophile
Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Order of (number of columns in) the output. If `N` is not specified,
a square array is returned (``N = len(x)``).
Returns
-------
out : ndarray
Van der Monde matrix of order `N`. The first column is ``x^(N-1)``,
the second ``x^(N-2)`` and so forth.
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if N is None:
N=len(x)
X = ones( (len(x),N), x.dtype)
for i in range(N - 1):
X[:,i] = x**(N - i - 1)
return X
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape(N,)
A sequence of values to be histogrammed along the first dimension.
y : array_like, shape(M,)
A sequence of values to be histogrammed along the second dimension.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension (nx, ny = bins).
* If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension (x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, i.e. the bin count divided by the bin area.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights
are normalized to 1 if `normed` is True. If `normed` is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram: 1D histogram
histogramdd: Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample density,
defined such that:
.. math::
\\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1
where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i`
the area of bin `{i,j}`.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abcissa and `y` values on the ordinate axis.
Rather, `x` is histogrammed along the first dimension of the array
(vertical), and `y` along the second dimension of the array (horizontal).
This ensures compatibility with `histogramdd`.
Examples
--------
>>> x, y = np.random.randn(2, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8))
>>> H.shape, xedges.shape, yedges.shape
((5, 8), (6,), (9,))
We can now use the Matplotlib to visualize this 2-dimensional histogram:
>>> extent = [yedges[0], yedges[-1], xedges[-1], xedges[0]]
>>> import matplotlib.pyplot as plt
>>> plt.imshow(H, extent=extent, interpolation='nearest')
<matplotlib.image.AxesImage object at ...>
>>> plt.colorbar()
<matplotlib.colorbar.Colorbar instance at ...>
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x,y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n,n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
Parameters
----------
n : int
The row dimension of the square arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return mask_indices(n, tril, k)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return tril_indices(arr.shape[0], k)
def triu_indices(n, k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return mask_indices(n, triu, k)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
See `triu_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `triu` for details).
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return triu_indices(arr.shape[0],k)
| gpl-3.0 |
rgommers/scipy | doc/source/tutorial/stats/plots/qmc_plot_conv_mc_sobol.py | 12 | 2386 | """Integration convergence comparison: MC vs Sobol'.
The function is a synthetic example specifically designed
to verify the correctness of the implementation [2]_.
References
----------
.. [1] I. M. Sobol. The distribution of points in a cube and the accurate
evaluation of integrals. Zh. Vychisl. Mat. i Mat. Phys., 7:784-802,
1967.
.. [2] Art B. Owen. On dropping the first Sobol' point. arXiv 2008.08051,
2020.
"""
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import qmc
n_conv = 99
ns_gen = 2 ** np.arange(4, 13) # 13
def art_2(sample):
# dim 3, true value 5/3 + 5*(5 - 1)/4
return np.sum(sample, axis=1) ** 2
functions = namedtuple('functions', ['name', 'func', 'dim', 'ref'])
case = functions('Art 2', art_2, 5, 5 / 3 + 5 * (5 - 1) / 4)
def conv_method(sampler, func, n_samples, n_conv, ref):
samples = [sampler(n_samples) for _ in range(n_conv)]
samples = np.array(samples)
evals = [np.sum(func(sample)) / n_samples for sample in samples]
squared_errors = (ref - np.array(evals)) ** 2
rmse = (np.sum(squared_errors) / n_conv) ** 0.5
return rmse
# Analysis
sample_mc_rmse = []
sample_sobol_rmse = []
rng = np.random.default_rng()
for ns in ns_gen:
# Monte Carlo
sampler_mc = lambda x: rng.random((x, case.dim))
conv_res = conv_method(sampler_mc, case.func, ns, n_conv, case.ref)
sample_mc_rmse.append(conv_res)
# Sobol'
engine = qmc.Sobol(d=case.dim, scramble=False)
conv_res = conv_method(engine.random, case.func, ns, 1, case.ref)
sample_sobol_rmse.append(conv_res)
sample_mc_rmse = np.array(sample_mc_rmse)
sample_sobol_rmse = np.array(sample_sobol_rmse)
# Plot
fig, ax = plt.subplots(figsize=(4, 4))
ax.set_aspect('equal')
# MC
ratio = sample_mc_rmse[0] / ns_gen[0] ** (-1 / 2)
ax.plot(ns_gen, ns_gen ** (-1 / 2) * ratio, ls='-', c='k')
ax.scatter(ns_gen, sample_mc_rmse, label="MC")
# Sobol'
ratio = sample_sobol_rmse[0] / ns_gen[0] ** (-2/2)
ax.plot(ns_gen, ns_gen ** (-2/2) * ratio, ls='-', c='k')
ax.scatter(ns_gen, sample_sobol_rmse, label="Sobol' unscrambled")
ax.set_xlabel(r'$N_s$')
ax.set_xscale('log')
ax.set_xticks(ns_gen)
ax.set_xticklabels([fr'$2^{{{ns}}}$' for ns in np.arange(4, 13)])
ax.set_ylabel(r'$\log (\epsilon)$')
ax.set_yscale('log')
ax.legend(loc='upper right')
fig.tight_layout()
plt.show()
| bsd-3-clause |
samzhang111/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/units/artist_tests.py | 6 | 1475 | """
Test unit support with each of the matplotlib primitive artist types
The axes handles unit conversions and the artists keep a pointer to
their axes parent, so you must init the artists with the axes instance
if you want to initialize them with unit data, or else they will not
know how to convert the units to scalars
"""
import random
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.text as text
import matplotlib.collections as collections
from basic_units import cm, inch
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.xaxis.set_units(cm)
ax.yaxis.set_units(cm)
if 0:
# test a line collection
# Not supported at present.
verts = []
for i in range(10):
# a random line segment in inches
verts.append(zip(*inch*10*np.random.rand(2, random.randint(2,15))))
lc = collections.LineCollection(verts, axes=ax)
ax.add_collection(lc)
# test a plain-ol-line
line = lines.Line2D([0*cm, 1.5*cm], [0*cm, 2.5*cm], lw=2, color='black', axes=ax)
ax.add_line(line)
if 0:
# test a patch
# Not supported at present.
rect = patches.Rectangle( (1*cm, 1*cm), width=5*cm, height=2*cm, alpha=0.2, axes=ax)
ax.add_patch(rect)
t = text.Text(3*cm, 2.5*cm, 'text label', ha='left', va='bottom', axes=ax)
ax.add_artist(t)
ax.set_xlim(-1*cm, 10*cm)
ax.set_ylim(-1*cm, 10*cm)
#ax.xaxis.set_units(inch)
ax.grid(True)
ax.set_title("Artists with units")
plt.show()
| apache-2.0 |
hrjn/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 53 | 13398 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import scipy.sparse as sp
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
X_sparse, y_dense = load_svmlight_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = sp.csr_matrix(y_dense)
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if (sp.issparse(y) and y.shape[0] == 1):
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 4)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 15)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = sp.csr_matrix(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
def test_load_with_long_qid():
# load svmfile with longint qid attribute
data = b("""
1 qid:0 0:1 1:2 2:3
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985""")
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
true_X = [[1, 2, 3],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985]]
true_y = [1, 0, 0, 3]
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
f.seek(0)
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f.seek(0)
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X) | bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/stackplot.py | 2 | 1846 | """
Stacked area plot for 1D arrays inspired by Douglas Y'barbo's stackoverflow
answer:
http://stackoverflow.com/questions/2225995/how-can-i-create-stacked-line-graph-with-matplotlib
(http://stackoverflow.com/users/66549/doug)
"""
import numpy as np
__all__ = ['stackplot']
def stackplot(axes, x, *args, **kwargs):
"""Draws a stacked area plot.
*x* : 1d array of dimension N
*y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension
1xN. The data is assumed to be unstacked. Each of the following
calls is legal::
stackplot(x, y) # where y is MxN
stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4, are all 1xNm
Keyword arguments:
*colors* : A list or tuple of colors. These will be cycled through and
used to colour the stacked areas.
All other keyword arguments are passed to
:func:`~matplotlib.Axes.fill_between`
Returns *r* : A list of
:class:`~matplotlib.collections.PolyCollection`, one for each
element in the stacked area plot.
"""
if len(args) == 1:
y = np.atleast_2d(*args)
elif len(args) > 1:
y = np.row_stack(args)
colors = kwargs.pop('colors', None)
if colors is not None:
axes.set_color_cycle(colors)
# Assume data passed has not been 'stacked', so stack it here.
y_stack = np.cumsum(y, axis=0)
r = []
# Color between x = 0 and the first array.
r.append(axes.fill_between(x, 0, y_stack[0, :],
facecolor=axes._get_lines.color_cycle.next(), **kwargs))
# Color between array i-1 and array i
for i in xrange(len(y) - 1):
r.append(axes.fill_between(x, y_stack[i, :], y_stack[i + 1, :],
facecolor=axes._get_lines.color_cycle.next(), **kwargs))
return r
| mit |
yask123/scikit-learn | examples/exercises/plot_cv_digits.py | 232 | 1206 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
mihaic/brainiak | brainiak/fcma/mvpa_voxelselector.py | 2 | 4426 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Full Correlation Matrix Analysis (FCMA)
Activity-based voxel selection
"""
# Authors: Yida Wang
# (Intel Labs), 2017
import numpy as np
from sklearn import model_selection
import logging
from mpi4py import MPI
logger = logging.getLogger(__name__)
__all__ = [
"MVPAVoxelSelector",
]
def _sfn(data, mask, myrad, bcast_var):
"""Score classifier on searchlight data using cross-validation.
The classifier is in `bcast_var[2]`. The labels are in `bast_var[0]`. The
number of cross-validation folds is in `bast_var[1].
"""
clf = bcast_var[2]
masked_data = data[0][mask, :].T
# print(l[0].shape, mask.shape, data.shape)
skf = model_selection.StratifiedKFold(n_splits=bcast_var[1],
shuffle=False)
accuracy = np.mean(model_selection.cross_val_score(clf, masked_data,
y=bcast_var[0],
cv=skf,
n_jobs=1))
return accuracy
class MVPAVoxelSelector:
"""Activity-based voxel selection component of FCMA
Parameters
----------
data: 4D array in shape [brain 3D + epoch]
contains the averaged and normalized brain data epoch by epoch.
It is generated by .io.prepare_searchlight_mvpa_data
mask: 3D array
labels: 1D array
contains the labels of the epochs.
It is generated by .io.prepare_searchlight_mvpa_data
num_folds: int
the number of folds to be conducted in the cross validation
sl: Searchlight
the distributed Searchlight object
"""
def __init__(self,
data,
mask,
labels,
num_folds,
sl
):
self.data = data
self.mask = mask.astype(np.bool)
self.labels = labels
self.num_folds = num_folds
self.sl = sl
num_voxels = np.sum(self.mask)
if num_voxels == 0:
raise ValueError('Zero processed voxels')
def run(self, clf):
""" run activity-based voxel selection
Sort the voxels based on the cross-validation accuracy
of their activity vectors within the searchlight
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
result_volume: 3D array of accuracy numbers
contains the voxelwise accuracy numbers obtained via Searchlight
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
logger.info(
'running activity-based voxel selection via Searchlight'
)
self.sl.distribute([self.data], self.mask)
self.sl.broadcast((self.labels, self.num_folds, clf))
if rank == 0:
logger.info(
'data preparation done'
)
# obtain a 3D array with accuracy numbers
result_volume = self.sl.run_searchlight(_sfn)
# get result tuple list from the volume
result_list = result_volume[self.mask]
results = []
if rank == 0:
for idx, value in enumerate(result_list):
if value is None:
value = 0
results.append((idx, value))
# Sort the voxels
results.sort(key=lambda tup: tup[1], reverse=True)
logger.info(
'activity-based voxel selection via Searchlight is done'
)
return result_volume, results
| apache-2.0 |
fabianp/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
vortex-ape/scikit-learn | sklearn/datasets/tests/test_base.py | 9 | 8508 | import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from functools import partial
import pytest
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets import load_wine
from sklearn.datasets.base import Bunch
from sklearn.datasets.tests.test_common import check_return_X_y
from sklearn.externals.six import b, u
from sklearn.externals._pilutil import pillow_installed
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
@pytest.fixture(scope="module")
def data_home(tmpdir_factory):
tmp_file = str(tmpdir_factory.mktemp("scikit_learn_data_home_test"))
yield tmp_file
_remove_dir(tmp_file)
@pytest.fixture(scope="module")
def load_files_root(tmpdir_factory):
tmp_file = str(tmpdir_factory.mktemp("scikit_learn_load_files_test"))
yield tmp_file
_remove_dir(tmp_file)
@pytest.fixture
def test_category_dir_1(load_files_root):
test_category_dir1 = tempfile.mkdtemp(dir=load_files_root)
sample_file = tempfile.NamedTemporaryFile(dir=test_category_dir1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
yield str(test_category_dir1)
_remove_dir(test_category_dir1)
@pytest.fixture
def test_category_dir_2(load_files_root):
test_category_dir2 = tempfile.mkdtemp(dir=load_files_root)
yield str(test_category_dir2)
_remove_dir(test_category_dir2)
def test_data_home(data_home):
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=data_home)
assert_equal(data_home, data_home)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=data_home)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files(load_files_root):
res = load_files(load_files_root)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
def test_default_load_files(test_category_dir_1, test_category_dir_2,
load_files_root):
res = load_files(load_files_root)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
def test_load_files_w_categories_desc_and_encoding(
test_category_dir_1, test_category_dir_2, load_files_root):
category = os.path.abspath(test_category_dir_1).split('/').pop()
res = load_files(load_files_root, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
def test_load_files_wo_load_content(
test_category_dir_1, test_category_dir_2, load_files_root):
res = load_files(load_files_root, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
check_return_X_y(digits, partial(load_digits))
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
if pillow_installed:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert_equal(len(res.feature_names), 10)
assert_true(res.DESCR)
# test return_X_y option
check_return_X_y(res, partial(load_diabetes))
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
assert_true(os.path.exists(res.data_filename))
assert_true(os.path.exists(res.target_filename))
# test return_X_y option
check_return_X_y(res, partial(load_linnerud))
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
assert_true(os.path.exists(res.filename))
# test return_X_y option
check_return_X_y(res, partial(load_iris))
def test_load_wine():
res = load_wine()
assert_equal(res.data.shape, (178, 13))
assert_equal(res.target.size, 178)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
check_return_X_y(res, partial(load_wine))
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
assert_true(os.path.exists(res.filename))
# test return_X_y option
check_return_X_y(res, partial(load_breast_cancer))
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
assert_true(os.path.exists(res.filename))
# test return_X_y option
check_return_X_y(res, partial(load_boston))
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a surprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
| bsd-3-clause |
jcasner/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/collections.py | 69 | 39876 | """
Classes for the efficient drawing of large collections of objects that
share most properties, e.g. a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g. you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g. a bunch of solid
line segemnts)
"""
import copy, math, warnings
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as _colors # avoid conflict with kwarg
import matplotlib.cm as cm
import matplotlib.transforms as transforms
import matplotlib.artist as artist
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
import matplotlib.mlab as mlab
class Collection(artist.Artist, cm.ScalarMappable):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets).
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(ie a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_offsets = np.array([], np.float_)
_transOffset = transforms.IdentityTransform()
_transforms = []
zorder = 1
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds = None,
offsets = None,
transOffset = None,
norm = None, # optional for ScalarMappable
cmap = None, # ditto
pickradius = 5.0,
urls = None,
**kwargs
):
"""
Create a Collection
%(Collection)s
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_urls(urls)
self._uniform_offsets = None
self._offsets = np.array([], np.float_)
if offsets is not None:
offsets = np.asarray(offsets)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._pickradius = pickradius
self.update(kwargs)
def _get_value(self, val):
try: return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: float(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a float or nonzero sequence of floats')
def _get_bool(self, val):
try: return (bool(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: bool(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a bool or nonzero sequence of them')
def get_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asarray(offsets, np.float_)
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
return result
def get_window_extent(self, renderer):
bbox = self.get_datalim(transforms.IdentityTransform())
#TODO:check to ensure that this does not fail for
#cases other than scatter plot legend
return bbox
def _prepare_points(self):
"""Point prep for drawing and hit testing"""
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(zip(xs, ys), path.codes))
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path) for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
return transform, transOffset, offsets, paths
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
transform, transOffset, offsets, paths = self._prepare_points()
renderer.draw_path_collection(
transform.frozen(), self.clipbox, clippath, clippath_trans,
paths, self.get_transforms(),
offsets, transOffset,
self.get_facecolor(), self.get_edgecolor(), self._linewidths,
self._linestyles, self._antialiaseds, self._urls)
renderer.close_group(self.__class__.__name__)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns True | False, ``dict(ind=itemlist)``, where every
item in itemlist contains the event.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible(): return False,{}
transform, transOffset, offsets, paths = self._prepare_points()
ind = mpath.point_in_path_collection(
mouseevent.x, mouseevent.y, self._pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, len(self._facecolors)>0)
return len(ind)>0,dict(ind=ind)
def set_pickradius(self,pickradius): self.pickradius = 5
def get_pickradius(self): return self.pickradius
def set_urls(self, urls):
if urls is None:
self._urls = [None,]
else:
self._urls = urls
def get_urls(self): return self._urls
def set_offsets(self, offsets):
"""
Set the offsets for the collection. *offsets* can be a scalar
or a sequence.
ACCEPTS: float or sequence of floats
"""
offsets = np.asarray(offsets, np.float_)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
#This decision is based on how they are initialized above
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
def get_offsets(self):
"""
Return the offsets for the collection.
"""
#This decision is based on how they are initialized above in __init__()
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
ACCEPTS: float or sequence of floats
"""
if lw is None: lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
def set_linewidths(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) ]
"""
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
if ls in dashd:
dashes = [dashd[ls]]
elif ls in cbook.ls_mapper:
dashes = [dashd[cbook.ls_mapper[ls]]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
if x in dashd:
dashes.append(dashd[x])
elif x in cbook.ls_mapper:
dashes.append(dashd[cbook.ls_mapper[x]])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls)==2:
dashes = ls
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes'%ls)
self._linestyles = dashes
def set_linestyles(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_dashes(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
ACCEPTS: Boolean or sequence of booleans
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
def set_antialiaseds(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c is None: c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_facecolors(self, c):
"""alias for set_facecolor"""
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if self._edgecolors == 'face':
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'face', the edge color will always be the same as
the face color.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
else:
if c is None: c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_edgecolors(self, c):
"""alias for set_edgecolor"""
return self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float.
ACCEPTS: float
"""
try: float(alpha)
except TypeError: raise TypeError('alpha must be a float')
else:
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = _colors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if self._edgecolors_original != 'face':
self._edgecolors = _colors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
"""
If the scalar mappable array is not none, update colors
from scalar data
"""
if self._A is None: return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if len(self._facecolors):
self._facecolors = self.to_rgba(self._A, self._alpha)
else:
self._edgecolors = self.to_rgba(self._A, self._alpha)
def update_from(self, other):
'copy properties from other to self'
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Collection'] = """\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
"""
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
"""
def __init__(self, meshWidth, meshHeight, coordinates, showedges, antialiased=True):
Collection.__init__(self)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._showedges = showedges
self._antialiased = antialiased
self._paths = None
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
# By converting to floats now, we can avoid that on every draw.
self._coordinates = self._coordinates.reshape((meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self, dataTrans=None):
if self._paths is None:
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
return self._paths
#@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support quadmeshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1: ],
c[1: , 1: ],
c[1: , 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
convert_mesh_to_paths = staticmethod(convert_mesh_to_paths)
def get_datalim(self, transData):
return self._bbox
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if self.check_update('array'):
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
renderer.draw_quad_mesh(
transform.frozen(), self.clipbox, clippath, clippath_trans,
self._meshWidth, self._meshHeight, coordinates,
offsets, transOffset, self.get_facecolor(), self._antialiased,
self._showedges)
renderer.close_group(self.__class__.__name__)
class PolyCollection(Collection):
def __init__(self, verts, sizes = None, closed = True, **kwargs):
"""
*verts* is a sequence of ( *verts0*, *verts1*, ...) where
*verts_i* is a sequence of *xy* tuples of vertices, or an
equivalent :mod:`numpy` array of shape (*nv*, 2).
*sizes* is *None* (default) or a sequence of floats that
scale the corresponding *verts_i*. The scaling is applied
before the Artist master transform; if the latter is an identity
transform, then the overall scaling is such that if
*verts_i* specify a unit square, then *sizes_i* is the area
of that square in points^2.
If len(*sizes*) < *nv*, the additional values will be
taken cyclically from the array.
*closed*, when *True*, will explicitly close the polygon.
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_verts(verts, closed)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def set_verts(self, verts, closed=True):
'''This allows one to delay initialization of the vertices.'''
if closed:
self._paths = []
for xy in verts:
if np.ma.isMaskedArray(xy):
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.ma.concatenate([xy, [xy[0]]])
else:
xy = np.asarray(xy)
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
def get_paths(self):
return self._paths
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
def __init__(self, xranges, yrange, **kwargs):
"""
*xranges*
sequence of (*xmin*, *xwidth*)
*yrange*
*ymin*, *ywidth*
%(Collection)s
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [ [(xmin, ymin), (xmin, ymax), (xmin+xwidth, ymax), (xmin+xwidth, ymin), (xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
"""
Create a BrokenBarHCollection to plot horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
A :class:`BrokenBarHCollection` is returned.
*kwargs* are passed on to the collection
"""
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1]-xslice[0]))
collection = BrokenBarHCollection(xranges, [ymin, ymax-ymin], **kwargs)
return collection
class RegularPolyCollection(Collection):
"""Draw a collection of regular polygons with *numsides*."""
_path_generator = mpath.Path.unit_regular_polygon
def __init__(self,
numsides,
rotation = 0 ,
sizes = (1,),
**kwargs):
"""
*numsides*
the number of sides of the polygon
*rotation*
the rotation of the polygon in radians
*sizes*
gives the area of the circle circumscribing the
regular polygon in points^2
%(Collection)s
Example: see :file:`examples/dynamic_collection.py` for
complete example::
offsets = np.random.rand(20,2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
black = (0,0,0,1)
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors = facecolors,
edgecolors = (black,),
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
self._transforms = [
transforms.Affine2D().rotate(-self._rotation).scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
def get_sizes(self):
return self._sizes
class StarPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
zorder = 2
def __init__(self, segments, # Can be None.
linewidths = None,
colors = None,
antialiaseds = None,
linestyles = 'solid',
offsets = None,
transOffset = None,
norm = None,
cmap = None,
pickradius = 5,
**kwargs
):
"""
*segments*
a sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can be a different length.
*colors*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed).
*antialiaseds*
must be a sequence of ones or zeros
*linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
a string or dash tuple. The dash tuple is::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *linewidths*, *colors*, or *antialiaseds* is None, they
default to their rcParams setting, in sequence form.
If *offsets* and *transOffset* are not None, then
*offsets* are transformed by *transOffset* and applied after
the segments have been transformed to display coordinates.
If *offsets* is not None but *transOffset* is None, then the
*offsets* are added to the segments before any transformation.
In this case, a single offset can be specified as::
offsets=(xo,yo)
and this value will be added cumulatively to each successive
segment, so as to produce a set of successively offset curves.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
"""
if colors is None: colors = mpl.rcParams['lines.color']
if linewidths is None: linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None: antialiaseds = (mpl.rcParams['lines.antialiased'],)
self.set_linestyles(linestyles)
colors = _colors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
**kwargs)
self.set_facecolors([])
self.set_segments(segments)
def get_paths(self):
return self._paths
def set_segments(self, segments):
if segments is None: return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(seg) for seg in _segments]
set_verts = set_segments # for compatibility with PolyCollection
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i%Noffs
segs[i] = segs[i] + offsets[io:io+1]
return segs
def set_color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._edgecolors = _colors.colorConverter.to_rgba_array(c)
def color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
warnings.warn('LineCollection.color deprecated; use set_color instead')
return self.set_color(c)
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class CircleCollection(Collection):
"""
A collection of circles, drawn using splines.
"""
def __init__(self, sizes, **kwargs):
"""
*sizes*
Gives the area of the circle in points^2
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
# sizes is the area of the circle circumscribing the polygon
# in points^2
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class EllipseCollection(Collection):
"""
A collection of ellipses, drawn using splines.
"""
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
*widths*: sequence
half-lengths of first axes (e.g., semi-major axis lengths)
*heights*: sequence
half-lengths of second axes
*angles*: sequence
angles of first axes, degrees CCW from the X-axis
*units*: ['points' | 'inches' | 'dots' | 'width' | 'height' | 'x' | 'y']
units in which majors and minors are given; 'width' and 'height'
refer to the dimensions of the axes, while 'x' and 'y'
refer to the *offsets* data units.
Additional kwargs inherited from the base :class:`Collection`:
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._widths = np.asarray(widths).ravel()
self._heights = np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() *(np.pi/180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = []
self._paths = [mpath.Path.unit_circle()]
self._initialized = False
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _init(self):
def on_dpi_change(fig):
self._transforms = []
self.figure.callbacks.connect('dpi_changed', on_dpi_change)
self._initialized = True
def set_transforms(self):
if not self._initialized:
self._init()
self._transforms = []
ax = self.axes
fig = self.figure
if self._units in ('x', 'y'):
if self._units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
sc = dx1/dx0
else:
if self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
_affine = transforms.Affine2D
for x, y, a in zip(self._widths, self._heights, self._angles):
trans = _affine().scale(x * sc, y * sc).rotate(a)
self._transforms.append(trans)
def draw(self, renderer):
if True: ###not self._transforms:
self.set_transforms()
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*,
*antialiaseds* are None, they default to their
:data:`matplotlib.rcParams` patch setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
None (ie a call to set_array has been made), at draw time a
call to scalar mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.fill:
return patch.get_facecolor()
return [0, 0, 0, 0]
facecolors = [determine_facecolor(p) for p in patches]
edgecolors = [p.get_edgecolor() for p in patches]
linewidths = [p.get_linewidths() for p in patches]
antialiaseds = [p.get_antialiased() for p in patches]
Collection.__init__(
self,
edgecolors=edgecolors,
facecolors=facecolors,
linewidths=linewidths,
linestyles='solid',
antialiaseds = antialiaseds)
else:
Collection.__init__(self, **kwargs)
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
def get_paths(self):
return self._paths
artist.kwdocd['Collection'] = patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'PolyCollection', 'BrokenBarHCollection', 'RegularPolyCollection',
'StarPolygonCollection', 'PatchCollection', 'CircleCollection'):
artist.kwdocd[k] = patchstr
artist.kwdocd['LineCollection'] = artist.kwdoc(LineCollection)
| agpl-3.0 |
start-jsk/jsk_apc | jsk_2015_05_baxter_apc/scripts/stat_object_size.py | 2 | 1680 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path as osp
import pickle
import cv2
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import jsk_apc2015_common
def get_object_sizes(data_dir):
cache_file = 'object_sizes.pkl'
if osp.exists(cache_file):
return pickle.load(open(cache_file, 'rb'))
img_shape = None
objects = jsk_apc2015_common.get_object_list()
df = []
for obj in objects:
mask_files = os.listdir(osp.join(data_dir, obj, 'masks'))
for f in mask_files:
if f.startswith('NP'):
continue
mask = cv2.imread(osp.join(data_dir, obj, 'masks', f), 0)
if img_shape is None:
img_shape = mask.shape
else:
assert img_shape == mask.shape
mask = (mask > 127).astype(int)
size = mask.sum()
df.append([objects.index(obj), obj, f, size])
df = pd.DataFrame(df)
df.columns = ['object_index', 'object_name', 'fname', 'size']
pickle.dump(df, open(cache_file, 'wb'))
return df
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', help='APC2015berkeley dataset path')
args = parser.parse_args()
df = get_object_sizes(data_dir=args.data_dir)
median_size = df.groupby('object_name').median()['size']
df['size'] /= median_size.max()
order = df.groupby('object_name').median().sort('size')[::-1]['object_index'].astype(np.int64)
sns.boxplot(x='object_index', y='size', data=df, order=order)
plt.savefig('apc2015_object_sizes.png')
| bsd-3-clause |
bravelittlescientist/kdd-particle-physics-ml-fall13 | src/random_forest.py | 1 | 1905 | #!/usr/bin/python2
# This is a random forest ensemble classifier based on the scikit
# ensembles module.
# taking as input an X and Y and any tree complexity parameters, and
# returning a classifier that can then be analyzed with the classifier.
# See the example in the main method for that and error-checking.
#
# Decision tree documentation:
# http://scikit-learn.org/stable/modules/tree.html
import sys
from util import get_split_training_dataset
from metrics import suite
import feature_selection_trees as fclassify
from sklearn.ensemble import RandomForestClassifier
from sklearn.grid_search import GridSearchCV
def train(Xtrain, Ytrain, n=350, grid=False):
""" Use entirety of provided X, Y to train random forest
Arguments
Xtrain -- Training data
Ytrain -- Training prediction
Returns
classifier -- A random forest of n estimators, fitted to Xtrain and Ytrain
"""
if grid == True:
forest = RandomForestClassifier(max_depth=None, random_state=0, min_samples_split=1,max_features=38)
parameters = {
'n_estimators': [200,250,300],
}
# Classify over grid of parameters
classifier = GridSearchCV(forest, parameters)
else:
classifier = RandomForestClassifier(n_estimators=n)
classifier.fit(Xtrain, Ytrain)
return classifier
if __name__ == "__main__":
# Let's take our training data and train a random forest
# on a subset.
Xt, Xv, Yt, Yv = get_split_training_dataset()
print "Random Forest Classifier"
Classifier = train(Xt, Yt)
suite(Yv, Classifier.predict(Xv))
# smaller feature set
Xtimp, features = fclassify.get_important_data_features(Xt, Yt)
Xvimp = fclassify.compress_data_to_important_features(Xv, features)
ClassifierImp = train(Xtimp,Yt)
print "Forest Classiifer, ~25 important features"
suite(Yv, ClassifierImp.predict(Xvimp))
| gpl-2.0 |
yanlend/scikit-learn | sklearn/tests/test_grid_search.py | 53 | 28730 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
ldamewood/renormalization | rain/abstaining.py | 1 | 7862 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This is dumb
from __future__ import print_function
from sklearn.ensemble.base import BaseEnsemble
from sklearn.ensemble.weight_boosting import AdaBoostClassifier
from sklearn.multiclass import OneVsRestClassifier
from scipy.stats import itemfreq
import numpy as np
import itertools
def hash_row(row):
one = np.uint64(1)
zero = np.uint64(0)
acc = zero
for i in row:
acc = np.left_shift(acc, one)
acc = np.bitwise_or(acc, one if i else zero)
return acc
def unhash_row(h, cols = None):
row = []
h = np.uint64(h)
one = np.uint64(1)
zero = np.uint64(0)
if cols is None:
while h > 0:
row.append(np.bitwise_and(h, one)==zero)
h = np.right_shift(h, one)
else:
for col in range(cols):
row.append(np.bitwise_and(h, one)==zero)
h = np.right_shift(h, one)
return row[::-1]
def weights(y):
return {x : 1.*f/len(y) for x,f in itemfreq(y)}
def transform_X_for_hash(X, h = 0):
cols = np.array(unhash_row(h, X.shape[1]))
rows = ~np.isnan(X[:,cols]).any(axis=1)
return X[rows][:,cols], rows, cols
class AbstainingClassifier(BaseEnsemble):
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None,
verbose = False):
super(AbstainingClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self._verbose = verbose
self._poly2features = False
def fit(self, X, y):
self.classes_ = np.unique(y)
self.n_classes_ = len(self.classes_)
# Find all feature hashes
# Feature: poly2 missing, etc hashes
hashes = np.unique(np.array([hash_row(i) for i in np.isnan(X)], dtype = np.uint64))
if self._poly2features:
hashes = np.unique([np.bitwise_or(i,j) for i,j in list(itertools.product(hashes, hashes))])
self._validate_estimator()
self.estimators_ = {}
self.estimator_weights_ = {}
self.estimator_errors_ = {}
for h in hashes:
self.estimators_[h] = self._make_estimator(append = False)
if self._verbose: print('Training on hash #%d' % h)
X_trans, rows, cols = transform_X_for_hash(X, h)
y_trans = y[rows]
if np.unique(y_trans).shape[0] == 1:
# Abstain from all data since there is only one class.
# Possible improvement - could this tell us that these features don't do anything?
self.estimator_weights_[h] = 0.
del self.estimators_[h]
continue
self.estimators_[h].fit(X_trans,y_trans)
incorrect = sum(self.estimators_[h].predict(X_trans) != y_trans)
abstaining = X.shape[0] - rows.sum()
wM = np.min(1. * incorrect / X.shape[0], 0.000001)
wA = 1. * abstaining / X.shape[0]
wC = 1 - wM - wA
print(wC, wM, wA)
self.estimator_weights_[h] = 1.#np.log(wC/wM) - np.log(len(self.estimators_[h].classes_) - 1)
weightsum = sum(self.estimator_weights_.values())
for h in self.estimator_weights_.iterkeys():
self.estimator_weights_[h] /= weightsum
def predict_proba(self, X):
#hashes = np.array([hash_row(i) for i in np.isnan(X)], dtype=long)
yres = np.zeros([X.shape[0], len(self.classes_)])
for h in self.estimators_.iterkeys():
weight = self.estimator_weights_[h]
estimator = self.estimators_[h]
X_trans, rows, cols = transform_X_for_hash(X, h)
if X_trans.shape[0] < 2 or X_trans.shape[1] < 2: continue
print(h,X_trans.shape)
y_predict = estimator.predict_proba(X_trans)
for cls in estimator.classes_:
yres[rows][:,self.classes_ == cls] += weight * y_predict[:,estimator.classes_ == cls]
return yres
def predict(self, X):
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def decision_function(self, X):
#check_is_fitted(self, "n_classes_")
#X = self._validate_X_predict(X)
pred = None
pred = np.zeros([X.shape[0], self.n_classes_])
for h in self.estimators_.iterkeys():
X_trans, rows, cols = transform_X_for_hash(X, h)
classes = self.estimators_[h].classes_[:, np.newaxis]
n_classes = len(self.estimators_[h].classes_)
pred[rows] += np.array([self.classes_ == self.estimators_[h].classes_[i] for i in (self.estimators_[h].predict(X_trans) == classes).T])
pred[rows] *= self.estimator_weights_[h]
#pred /= sum(self.estimator_weights_.values())
#if n_classes == 2:
# pred[:, 0] *= -1
# return pred.sum(axis=1)
return pred
if __name__ == '__main__':
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, linear_model, metrics, svm
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
R = (np.random.random(data[:n_samples / 2].shape) > 0.01)
data[:n_samples / 2] += np.power(R-1.,0.5)
# Create a classifier: a support vector classifier
baseclassifier = svm.SVC(gamma = 0.001)
classifier = AbstainingClassifier(baseclassifier)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show() | mit |
ctogle/dilapidator | src/dilap/BROKEN/graph/graph.py | 1 | 15113 | import dilap.core.base as db
import dilap.core.tools as dpr
import dilap.core.vector as dpv
import dilap.core.pointset as dps
#import dilap.core.graphnode as gnd
#import dilap.core.graphedge as geg
import dilap.mesh.tools as dtl
import matplotlib.pyplot as plt
import pdb
class geometry(db.base):
def radius(self):
pmags = []
for x in range(self.points.pcnt):
pmags.append(self.points.ps[x].magnitude())
return max(pmags)
def __init__(self,*args,**kwargs):
self.points = dps.pointset()
self.curves = []
self.surfaces = []
class node(db.base):
def connect(self,other):
if not other in self.ring:
self.ring.append(other)
def disconnect(self,other):
if other in self.ring:
self.ring.remove(other)
def __init__(self,index,px,**kwargs):
self.index = index
self.px = px
self.ring = []
class edge(db.base):
def connect(self,other):
if not other in self.ring:
self.ring.append(other)
def disconnect(self,other):
if other in self.ring:
self.ring.remove(other)
def __init__(self,index,one,two,**kwargs):
self.index = index
self.one = one
self.two = two
self.ring = []
class loop(db.base):
def __init__(self,index,*edges,**kwargs):
self.index = index
self.edges = edges
class face(db.base):
def __init__(self,index,*loops,**kwargs):
self.index = index
self.loops = loops
self.fnorm = None
class body(db.base):
def __init__(self,index,*faces,**kwargs):
self.index = index
self.faces = faces
class hole(db.base):
def __init__(self,index,body,**kwargs):
self.index = index
self.body = body
class mesh(db.base):
# returns all the d-cells incident on the
# (d-1)-cells and adjacent to the (d+1)-cells, after
# considering the restrictions imposed by non-None arguments
def mask(self,d = 0,v = None,e = None,l = None):
results = []
if d == 0:
if not v is None:
for ve in self.mask(1,v,None,None):
for vv in self.mask(0,None,ve,None):
if not vv is v:results.append(vv)
if not e is None:results.extend([e.one,e.two])
if not l is None:
for ve in self.mask(1,None,None,l):
for vv in self.mask(0,None,ve,None):
if not vv in results:results.append(vv)
elif d == 1:
if not v is None:results.extend(v.ring)
if not e is None:
for vv in self.mask(0,None,e,None):
for ve in self.mask(1,vv,None,None):
if not ve is e:results.append(ve)
if not l is None:results.extend(l.edges)
elif d == 2:
if not v is None:
for ve in self.mask(1,v,None,None):
for vl in self.mask(2,None,ve,None):
if not vl in results:results.append(vl)
if not e is None:results.extend(e.ring)
if not l is None:
for ve in self.mask(1,None,None,l):
for vl in self.mask(2,None,ve,None):
if not vl is l:results.append(vl)
return results
def __init__(self,index,owner,**kwargs):
self.index = index
self.owner = owner
self.nodes = []
self.edges = []
self.loops = []
self.faces = []
self.bdies = []
self.holes = []
self.nlook = {}
self.elook = {}
self.nodecount = 0
self.edgecount = 0
self.loopcount = 0
self.facecount = 0
self.bodycount = 0
self.holecount = 0
def topo(self):
return (self.nodecount,self.edgecount,self.loopcount,
self.facecount,self.bodycount,self.holecount)
def eulerstate(self):
v,e,l,f,s,g = self.topo()
state = v-e+f-(l-f)-2*(s-g)
return state
# make-body-face-loop-vertex
def mbflv(self,nx):
newnode = self.add_node(nx)
newloop = self.add_loop()
newface = self.add_face(newloop)
newbody = self.add_body(newface)
return newface,newloop,newnode
# make-edge-vertex
# create an edge starting at e with angle a
def mev(self,v,e = None,a = None):
dp = dpv.vector(0,0,0)
np = self.owner.geom.points.ps[v.index].copy().translate(dp)
nx = self.owner.geom.new_point(np)
newnode = self.add_node(nx)
newedge = self.add_edge(v,newnode)
return newedge,newnode
# make-edge
#
def me(self,v1,v2,e1 = None,a1 = None,e2 = None,a2 = None):
# call either mekl,mefl,or mekbfl
#newedge = self.add_edge(v1,v2)
#return newedge.index
raise NotImplemented
# make-edge-kill-loop
def mekl(self):raise NotImplemented
# make-edge-face-loop
def mefl(self):raise NotImplemented
# make-edge-kill-body-face-loop
def mekbfl(self):raise NotImplemented
def glue(self):raise NotImplemented
def dkflev(self):raise NotImplemented
def kev(self):raise NotImplemented
def ke(self):raise NotImplemented
def unglue(self):raise NotImplemented
def mme(self):raise NotImplemented
def esplit(self):raise NotImplemented
def kve(self):raise NotImplemented
def lmove(self):raise NotImplemented
def add_node(self,npx,**kwargs):
if npx in self.nlook:return self.nlook[npx]
newnode = node(self.nodecount,npx,**kwargs)
self.nodes.append(newnode)
self.nlook[newnode.px] = self.nodecount
self.nodecount += 1
return newnode
def add_edge(self,nd1,nd2,**kwargs):
if type(nd1) == type(1):nd1 = self.nodes[nd1]
if type(nd2) == type(1):nd2 = self.nodes[nd2]
ekey1 = (nd1.index,nd2.index)
ekey2 = (nd2.index,nd1.index)
if nd2 in nd1.ring:return self.elook[ekey1]
newedge = edge(self.edgecount,nd1,nd2,**kwargs)
nd1.connect(nd2)
nd2.connect(nd1)
self.edges.append(newedge)
self.elook[ekey1] = newedge.index
self.elook[ekey2] = newedge.index
self.edgecount += 1
return newedge
def add_loop(self,*edges,**kwargs):
newloop = loop(self.loopcount,edges,**kwargs)
for eg in edges:eg.connect(newloop)
self.loops.append(newloop)
self.loopcount += 1
return newloop
def add_face(self,*loops,**kwargs):
newface = face(self.facecount,loops,**kwargs)
#for lp in loops:lp.connect(newface)
self.faces.append(newface)
self.facecount += 1
return newface
def add_body(self,*faces,**kwargs):
newbody = body(self.bodycount,faces,**kwargs)
#for lp in loops:lp.connect(newface)
self.bdies.append(newbody)
self.bodycount += 1
return newbody
def delete_edge(self,ex):
eg = self.edges[ex]
nd1,nd2 = eg.one,eg.two
nd1.disconnect(nd2)
nd2.disconnect(nd1)
ekey1 = (nd1.index,nd2.index)
ekey2 = (nd2.index,nd1.index)
self.edges[eg.index] = None
del self.elook[ekey1]
del self.elook[ekey2]
def replace_edge(self,rx,*exs):
ering = self.edges[rx].ring
self.delete_edge(rx)
for ex in exs:
for er in ering:
self.edges[ex].connect(er)
class brep(db.base):
def plot(self,ax = None):
if ax is None:ax = dtl.plot_axes()
for n in self.mesh.nodes:
dtl.plot_point(self.geom.points.ps[n.px],ax)
for e in self.mesh.edges:
if e is None:continue
eps = self.geom.points.ps[e.one.px],self.geom.points.ps[e.two.px]
dtl.plot_edges(eps,ax)
r = self.geom.radius()
ax.set_xlim([-r,r])
ax.set_ylim([-r,r])
ax.set_zlim([-r,r])
return ax
def __init__(self,*args,**kwargs):
self.geom = geometry()
self.mesh = mesh(0,self)
def new_body(self,np):
nx = self.geom.points.new_point(np)
self.mesh.mbflv(nx)
def add_node(self,p):
x = self.points.new_point(p)
self.mesh.add_node(x)
def add_edge(self,p1,p2):
x1,x2 = self.points.new_points(p1,p2)
self.mesh.add_edge(x1,x2)
def add_face(self,*ps):
pxs = self.geom.points.new_points(*ps)
pxs = [self.mesh.add_node(pxs[x]) for x in range(len(pxs))]
exs = [self.mesh.add_edge(pxs[x-1],pxs[x]) for x in range(len(pxs))]
self.mesh.add_face(*exs)
def split_edge(self,ex):
e = self.mesh.edges[ex]
nds = self.mesh.mask(0,None,e,None)
mp = dpv.midpoint(*self.geom.points.get_points(nds[0].px,nds[1].px))
mx = self.mesh.add_node(self.geom.points.add_point(mp))
e1 = self.mesh.add_edge(nds[0].index,mx)
e2 = self.mesh.add_edge(nds[1].index,mx)
self.mesh.replace_edge(e.index,e1,e2)
def model(self):
pdb.set_trace()
def fun(self):
self.add_face(
dpv.vector( 0, 0,0),dpv.vector(10, 0,0),
dpv.vector(10,10,0),dpv.vector( 0,10,0))
self.add_face(
dpv.vector(10, 0,0),dpv.vector(20, 0,0),
dpv.vector(20,10,0),dpv.vector(10,10,0))
self.geom.points.ps[1].translate_z(10)
self.geom.points.ps[2].translate_z(10)
self.split_edge(2)
self.geom.points.ps[6].translate_z(2)
return
def btest():
br = brep()
nbp = dpv.vector(1,1,0)
br.new_body(nbp).mev(0)
#br.fun()
br.plot()
plt.show()
class twomanifold_graph(db.base):
def plot(self,ax = None):
if ax is None:ax = dtl.plot_axes()
for n in self.nodes:
if not n is None:n.plot(ax)
for e in self.edges:
if not e is None:e.plot(ax)
for f in self.faces:
if not f is None:f.plot(ax)
r = self.radius()
ax.set_xlim([-r,r])
ax.set_ylim([-r,r])
ax.set_zlim([-r,r])
return ax
def radius(self):
pmags = []
for nx in range(self.nodecount):
nd = self.nodes[nx]
if nd is None:continue
pmags.append(nd.p.magnitude())
return max(pmags)
def euler_poincare(self):
#V – E + F = H + 2 * (S – G)
left = len(self.nodes)-len(self.edges)+len(self.faces)
right = 0 + 2 * (1 - 0)
return left == right
nodeclass = node
edgeclass = edge
faceclass = face
def __init__(self,**kwargs):
self._def('index',None,**kwargs)
self.nodes = []
self.nodes_lookup = {}
self.edges = []
self.edges_lookup = {}
self.faces = []
self.faces_lookup = {}
self.nodecount = 0
self.edgecount = 0
self.facecount = 0
# add a new node to the graph if no existing node exists
# ndkey is a tuple(x,y,z)
def add_node(self,ndkey,**kwargs):
if ndkey in self.nodes_lookup:
nd = self.nodes[self.nodes_lookup[ndkey]]
if not nd is None:return nd.index
nx,ny,nz = ndkey
newnode = self.nodeclass(self.nodecount,
dpv.vector(nx,ny,nz),**kwargs)
self.nodes.append(newnode)
self.nodes_lookup[newnode.key()] = newnode.index
self.nodecount += 1
return newnode.index
# delete an existing node from the graph
def del_node(self,ndkey):
if ndkey in self.nodes_lookup:
nd = self.nodes[self.nodes_lookup[ndkey]]
if nd is None:return
for ekey in self.edges_lookup:
if nd.index in ekey:
self.del_edge(*ekey)
self.nodes[nd.index] = None
del self.nodes_lookup[nd.key()]
# add a new edge to the graph, or return existing index
def add_edge(self,ndkey1,ndkey2,**kwargs):
if ndkey1 in self.nodes_lookup:
nd1 = self.nodes[self.nodes_lookup[ndkey1]]
else:nd1 = self.nodes[self.add_node(ndkey1)]
if ndkey2 in self.nodes_lookup:
nd2 = self.nodes[self.nodes_lookup[ndkey2]]
else:nd2 = self.nodes[self.add_node(ndkey2)]
egkey = (ndkey1,ndkey2)
if egkey in self.edges_lookup:
eg = self.edges[self.edges_lookup[egkey]]
if not eg is None:return eg.index
egkey = (ndkey2,ndkey1)
if egkey in self.edges_lookup:
eg = self.edges[self.edges_lookup[egkey]]
if not eg is None:return eg.index
newedge = self.edgeclass(self.edgecount,nd1,nd2,**kwargs)
self.edges_lookup[(ndkey1,ndkey2)] = newedge.index
self.edges_lookup[(ndkey2,ndkey1)] = newedge.index
self.edges.append(newedge)
self.edgecount += 1
return newedge.index
# delete an existing edge from the graph
def del_edge(self,ndkey1,ndkey2):
ekey = (ndkey1,ndkey2)
if not ekey in self.edges_lookup:return
edge = self.edges[self.edges_lookup[ekey]]
del self.edges_lookup[ekey]
del self.edges_lookup[ekey[::-1]]
self.edges[edge.index] = None
# add a new face to the graph if no existing face exists
# fkey is a tuple(nkey1,...,nkeyN)
def add_face(self,fkey,**kwargs):
pdb.set_trace()
# delete an existing face from the graph
def del_face(self,fkey):
pdb.set_trace()
def test():
g = graph()
g.add_edge(( 0, 0,0),(10, 10,0))
g.add_edge(( 0, 0,0),(10,-10,0))
g.add_edge((10,10,0),( 0,-10,0))
g.plot()
plt.show()
'''#
def get_nodes(self,nkeys):
nds = []
for nk in nkeys:
nd = self.nodes[self.nodes_lookup[nk]]
nds.append(nd)
return nds
def get_node_points(self,nkeys):
nps = []
for nk in nkeys:
np = self.nodes[self.nodes_lookup[nk]].p
nps.append(np.copy())
return nps
# add a new edges to the graph, return indicies
def _add_edges(self,ndkeys,**kwargs):
edgexs = []
for kdx in range(1,len(ndkeys)):
ndkey1,ndkey2 = ndkeys[kdx-1],ndkeys[kdx]
edgexs.append(self._add_edge(ndkey1,ndkey2,**kwargs))
return edgexs
# remove existing edge from ndx1 to ndx2
# add two new edges, connecting n to ndx1 and to ndx2
def _split_edge(self,ndx1,ndx2,newndx,**kwargs):
sekey = self.edges_lookup[(ndx1,ndx2)]
if not sekey is None:
sedge = self.edges[sekey]
kwargs['interpolated'] = sedge.interpolated
self._del_edge(ndx1,ndx2)
nd1,nd2 = self.nodes[ndx1],self.nodes[newndx]
if nd1.p.near(nd2.p):pdb.set_trace()
newedge = edge(nd1,nd2,**kwargs)
self._add_edge(newedge)
nd1,nd2 = self.nodes[ndx2],self.nodes[newndx]
if nd1.p.near(nd2.p):pdb.set_trace()
newedge = edge(nd1,nd2,**kwargs)
self._add_edge(newedge)
'''#
| mit |
waterponey/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 77 | 3825 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'fast_mcd expects at least 2 samples',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'MinCovDet expects at least 2 samples',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
shenzebang/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
dhhagan/py-openaq | docs/conf.py | 1 | 5532 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# py-openaq documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 12 20:42:43 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_bootstrap_theme
import matplotlib as mpl
mpl.use("Agg")
sys.path.insert(0, os.path.abspath('sphinxext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'matplotlib.sphinxext.plot_directive',
'plot_generator',
'numpydoc',
'ipython_directive',
'ipython_console_highlighting'
]
autosummary_generate = True
numpydoc_show_class_members = False
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'py-openaq'
import time
copyright = '{}, David H Hagan'.format(time.strftime("%Y"))
author = 'David H Hagan'
# The short X.Y version.
sys.path.insert(0, os.path.abspath(os.path.pardir))
import openaq
version = openaq.__version__
# The full version, including alpha/beta/rc tags.
release = openaq.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_theme = 'bootstrap'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
'source_link_position': 'footer',
'bootswatch_theme': 'paper',
'navbar_sidebarrel': False,
'bootstrap_version': "3",
'navbar_links': [
("API", "api"),
("Tutorial", "tutorial"),
("Gallery", "examples/index")
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', 'example_thumbs']
# Output file base name for HTML help builder.
htmlhelp_basename = 'py-openaqdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'py-openaq.tex', 'py-openaq Documentation',
'David H Hagan', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'py-openaq', 'py-openaq Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'py-openaq', 'py-openaq Documentation',
author, 'py-openaq', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
def setup(app):
app.add_javascript("copybutton.js")
app.add_stylesheet("style.css")
| mit |
poldrack/fmri-analysis-vm | analysis/utils/graph_utils.py | 1 | 2407 | def show_graph_from_adjmtx(A,B,C,title=''):
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
gr = nx.DiGraph()
nodes=list(range(1,A.shape[0]))
gr.add_nodes_from(nodes)
gr.add_node('u')
rows, cols = np.where(A == 1)
edges_A = list(zip(cols.tolist(), rows.tolist()))
gr.add_edges_from(edges_A)
rows, cols = np.where(B == 1)
edges_B = list(zip(cols.tolist(), rows.tolist()))
gr.add_edges_from(edges_B)
mylabels={i:'%d'%i for i in gr.nodes() if not i=='u'} # {(gr.nodes(),['%d'%i for i in gr.nodes()])
rows=np.where(C==1)[0]
edges_C=[]
for r in rows:
edges_C.append(('u',r))
gr.add_edges_from(edges_C)
mylabels['u']='u'
pos=nx.circular_layout(gr)
print(gr.edges())
nx.draw_networkx_nodes(gr, node_size=500, pos=pos,labels=mylabels, with_labels=True)
nx.draw_networkx_labels(gr,pos=pos,labels=mylabels)
nx.draw_networkx_edges(gr,pos,edgelist=edges_C,edge_color='k')
print('Black: input')
nx.draw_networkx_edges(gr,pos,edgelist=edges_A,edge_color='r')
print('Red: unmodulated')
if edges_B:
nx.draw_networkx_edges(gr,pos,edgelist=edges_B,edge_color='b')
print('Blue: modulated')
plt.title(title)
plt.show()
return gr
def show_graph_from_pattern(pattern_file,nnodes=5):
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
pf=[i.strip().replace('"','') for i in open(pattern_file).readlines()]
if not pf[0].find('digraph')>-1:
raise RuntimeError('input is not a valid dot file')
gr = nx.DiGraph()
nodes=[]
edges=[]
for l in pf[1:]:
l_s=l.split(' ')
print(l_s)
if len(l_s)>1:
# if it's a numeric node, add to the list
try:
nodes.append(int(l_s[0]))
n1=int(l_s[0])
except:
n1=l_s[0]
try:
nodes.append(int(l_s[2]))
n2=int(l_s[2])
except:
n2=l_s[2]
edges.append((n1,n2))
assert l_s[4].find('arrowhead')>-1
if l_s[4].find('none')>-1:
edges.append((n2,n1))
nodes=list(range(0,nnodes)) # include any nodes that had no connnections
mylabels={i:'%d'%i for i in nodes} # {(gr.nodes(),['%d'%i for i in gr.nodes()])
mylabels['u']='u'
nodes.append('u')
gr.add_nodes_from(nodes)
gr.add_edges_from(edges)
pos=nx.circular_layout(gr)
nx.draw_networkx_nodes(gr, node_size=500, pos=pos,labels=mylabels, with_labels=True)
nx.draw_networkx_labels(gr,pos=pos,labels=mylabels)
nx.draw_networkx_edges(gr,pos,edge_color='r')
print('Red: unmodulated')
plt.show()
return gr
| mit |
fraserphysics/F_UNCLE | examples/make_notes.py | 1 | 12712 | """Script for re-generating the notes figures
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Standard python packages
import sys
import os
import pdb
import argparse
import time
from collections import OrderedDict
# External python packages
import numpy as np
import matplotlib.pyplot as plt
# F_UNLCE packages
sys.path.append(os.path.abspath('./../'))
from F_UNCLE.Experiments.GunModel import Gun, GunExperiment
from F_UNCLE.Experiments.Stick import Stick, StickExperiment
from F_UNCLE.Experiments.Sphere import Sphere, SphereExperiment
from F_UNCLE.Models.Isentrope import EOSModel, EOSBump
from F_UNCLE.Opt.Bayesian import Bayesian
from F_UNCLE.Models.Ptw import Ptw
parser = argparse.ArgumentParser(description='Generate plots for notes.tex')
parser.add_argument('--show', dest='show', action='store_true')
parser.add_argument('--fig_dir', type=str, dest='fig_dir', default='./NotesFig/',
help='Directory of figures')
for s, h in (
('eos_diff', 'Plot the difference between the prior and true eos'),
('eos', 'Plot prior and true eos'),
('eos_basis', 'Plots the eos basis functions'),
('info_all',
'Plot the eigenvalues and eigenfunctions for all experiments'),
('info_gun',
'Plot the eigenvalues and eigenfunctions for the gun experiment'),
('info_stick',
'Plot the eigenvalues and eigenfunctions for the stick experiment'),
('info_sphere',
'Plot the eigenvalues and eigenfunctions for the sphere experiment'),
('gun_sens', 'Plot the sensitivity of the gun results to the model'),
('stick_sens',
'Plot the sensitivity of the stick results to the model'),
('sphere_sens',
'Plot the sensitivity of the sphere results to the model'),
('conv', 'Plot the difference between the prior and true eos'),
('stick_results', 'Results from the stick experiment'),
('gun_results', 'Results from the gun experiment'),
('sphere_results', 'Results from the sphere experiment'),
('rayl_line', 'Rayleigh line results'),
):
parser.add_argument('--' + s, dest=s, action='store_const', const=True,
default=False, help=h)
options = parser.parse_args()
if not os.path.isdir(options.fig_dir):
os.mkdir(options.fig_dir)
if __name__ == '__main__':
#################
# Get Data #
#################
# 1. Generate a functional form for the prior
init_prior = np.vectorize(lambda v: 2.56e9 / v**3)
# 2. Create the model and *true* EOS
eos_model = EOSModel(init_prior, Spline_sigma=0.005, spacing='lin')
eos_true = EOSBump()
# 3. Create the objects to generate simulations and pseudo experimental data
gun_simulation = Gun(mass_he=1.0, sigma=1.0)
gun_experiment = GunExperiment(model=eos_true, mass_he=1.0)
stick_simulation = Stick(model_attribute=eos_true)
stick_experiment = StickExperiment(model=eos_true,
sigma_t=1E-9,
sigma_x=2E-3)
sphere_experiment = SphereExperiment(model=eos_true, ptw=Ptw())
sphere_simulation = Sphere()
models = OrderedDict()
models['eos'] = eos_model
models['strength'] = Ptw()
# 4. Create the analysis object
analysis = Bayesian(
simulations={
'Gun': [gun_simulation, gun_experiment],
'Stick': [stick_simulation, stick_experiment],
'Sphere': [sphere_simulation, sphere_experiment]
},
models= models,
opt_keys=['eos'],
constrain=True,
outer_reltol=1E-6,
precondition=True,
debug=False,
verb=True,
sens_mode='ser',
maxiter=1)
# 5. Generate data from the simulations using the prior
gun_prior_sim = gun_simulation(analysis.models)
stick_prior_sim = stick_simulation(analysis.models)
# sphere_prior_sim = sphere_simulation(analysis.models)
# 6. Run the analysis
to = time.time()
opt_model, history, sens_matrix, fisher_matrix = analysis()
print('time taken ', to - time.time() )
# 7. Update the simulations and get new data
g_time_s, (g_vel_s, g_pos_s, labels), g_spline_s =\
opt_model.simulations['Gun']['sim'](opt_model.models)
g_time_e, (g_vel_e, g_pos_e, tmp, labels), g_spline_e =\
opt_model.simulations['Gun']['exp']()
s_pos_s, (s_time_s, labels), s_data_s =\
opt_model.simulations['Stick']['sim'](opt_model.models)
s_pos_e, (s_time_e, tmp, tmp, lables), s_data_e = opt_model.simulations['Stick']['exp']()
sp_res_s = opt_model.simulations['Sphere']['sim'](opt_model.models)
sp_res_e = opt_model.simulations['Sphere']['exp']()
####################
# Generate Figures #
####################
from matplotlib import rcParams
rcParams['axes.labelsize'] = 8
rcParams['xtick.labelsize'] = 8
rcParams['ytick.labelsize'] = 8
rcParams['legend.fontsize'] = 7
rcParams['legend.handlelength'] = 3.0
rcParams['backend'] = 'Agg'
pagewidth = 360 # pts
au_ratio = (np.sqrt(5) - 1.0) / 2.0
figwidth = 1.0 # fraction of \pagewidth for figure
figwidth *= pagewidth / 72.27
figtype = '.pdf'
out_dir = options.fig_dir
square = (figwidth, figwidth)
tall = (figwidth, 1.25 * figwidth)
vrange=(0.2,0.9)
def eos_diff():
''' Compare EOS functions
'''
fig = plt.figure(figsize=square)
opt_model.models['eos'].plot_diff(
axes=fig.gca(),
isentropes=[eos_true],
labels=['True'],
vrange=vrange)
return fig
def rayl_line():
''' Rayleigh line
'''
fig = plt.figure(figsize=square)
ax = fig.gca()
opt_model.simulations['Stick']['sim'].\
plot(opt_model.models,
axes=ax,
vrange=vrange)
eos_model.prior.plot(axes=ax,
linestyles=['--b'],
labels=['Prior EOS'],
vrange=vrange)
eos_true.plot(axes=ax,
linestyles=['-.g'],
labels=['True EOS'],
vrange=vrange)
ax.legend(loc='best')
fig.tight_layout()
return fig
def eos():
''' Nominal and true EOS:
'''
fig = plt.figure(figsize=square)
ax = fig.gca()
eos_model.prior.plot(axes=ax,
linestyles=['--b'],
labels=['Prior EOS'],
vrange=vrange)
eos_true.plot(axes=ax,
linestyles=['-.g'],
labels=['True EOS'],
vrange=vrange)
ax.legend(loc='best')
fig.tight_layout()
return fig
def info_all():
spec_data = Bayesian.fisher_decomposition(
fisher_matrix, 'All', opt_model.models, 'isen')
fig = plt.figure(figsize=tall)
fig = opt_model.plot_fisher_data(spec_data, fig=fig)
fig.set_size_inches(tall)
fig.tight_layout()
return fig
def info_gun():
''' Fisher information about the gun experiment
'''
spec_data = Bayesian.fisher_decomposition(
fisher_matrix, 'Gun', opt_model.models, 'eos')
fig = plt.figure(figsize=tall)
fig = opt_model.plot_fisher_data(spec_data, fig=fig)
fig.set_size_inches(tall)
fig.tight_layout()
return fig
def info_stick():
''' Fisher information about the stick
'''
spec_data = Bayesian.fisher_decomposition(
fisher_matrix, 'Stick', opt_model.models, 'eos')
fig = plt.figure(figsize=tall)
fig = opt_model.plot_fisher_data(spec_data, fig=fig)
fig.tight_layout()
return fig
def info_sphere():
''' Fisher information about the sphere
'''
spec_data = Bayesian.fisher_decomposition(
fisher_matrix, 'Sphere', opt_model.models, 'eos')
fig = plt.figure(figsize=tall)
fig = opt_model.plot_fisher_data(spec_data, fig=fig)
fig.tight_layout()
return fig
def stick_results():
''' Results of the optimized stick simulation
'''
fig = plt.figure(figsize=square)
ax = fig.gca()
stick_simulation.plot(opt_model.models,
axes=ax, linestyles=['-k'],
labels=['Fit EOS'], level=2,
data=(s_pos_s, (s_time_s,), s_data_s))
stick_simulation.plot(opt_model.models,
axes=ax, linestyles=['+g'],
labels=['True EOS'], level=2,
data=(s_pos_e, (s_time_e,), s_data_e))
stick_simulation.plot(opt_model.models,
axes=ax, linestyles=['--b'],
labels=['Prior EOS'], level=2,
data=stick_prior_sim)
ax.legend(loc='best')
fig.tight_layout()
return fig
def gun_results():
''' Results of the optimized gun simulation
'''
fig = plt.figure(figsize=tall)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
opt_model.models['eos'].plot(axes=ax1,
linestyles=['-k'],
labels=['Fit EOS'],
vrange=vrange)
eos_model.prior.plot(axes=ax1,
linestyles=['--b'],
labels=['Prior EOS'],
vrange=vrange)
eos_true.plot(axes=ax1,
linestyles=['-.g'],
labels=['True EOS'],
vrange=vrange)
ax1.legend(loc='best')
gun_simulation.plot(axes=ax2, linestyles=['-k', '-r'],
labels=['Fit EOS', 'Error'],
data=[(g_time_s, (g_vel_s, g_pos_s), g_spline_s),
(g_time_e, (g_vel_e, g_pos_e), g_spline_e)])
gun_simulation.plot(axes=ax2, linestyles=['-.g'], labels=['True EOS'],
data=[(g_time_e, (g_vel_e, g_pos_e), g_spline_e)])
gun_simulation.plot(axes=ax2, linestyles=['--b'], labels=['Prior EOS'],
data=[gun_prior_sim])
ax2.legend(loc='upper left', framealpha=0.5)
fig.tight_layout()
return fig
def sphere_results():
''' Results of the optimized Sphere simulation
'''
fig = plt.figure(figsize=tall)
fig = sphere_simulation.plot(sp_res_s, fig=fig)
fig.tight_layout()
return fig
def conv():
'''Convergence history
'''
fig = plt.figure(figsize=square)
opt_model.plot_convergence(history, axes=fig.gca())
fig.tight_layout()
return fig
gun_sens = lambda: EOSModel.plot_sens_matrix(
sens_matrix,
'Gun',
opt_model.models,
'eos',
fig=plt.figure(figsize=square),
) # Gun sensitivity plot
stick_sens = lambda: EOSModel.plot_sens_matrix(
sens_matrix,
'Stick',
opt_model.models,
'eos',
fig=plt.figure(figsize=square),
) # Stick sensitivity plot
sphere_sens = lambda: EOSModel.plot_sens_matrix(
sens_matrix,
'Sphere',
opt_model.models,
'eos',
fig=plt.figure(figsize=square),
) # Sphere sensitivity plot
eos_basis = lambda: eos_model.plot_basis(
fig=plt.figure(figsize=square)) # EOS basis functions
L = locals()
for name in '''eos_diff rayl_line eos info_gun info_stick info_sphere stick_results gun_results sphere_results conv gun_sens stick_sens sphere_sens eos_basis '''.split():
# for name in """gun_results gun_sens eos_diff info_gun stick_results stick_sens info_stick""".split():
#for name in """gun_results gun_sens eos_diff info_gun stick_results stick_sens info_stick eos""".split():
# for name in '''eos_diff rayl_line eos info_stick conv stick_sens
# stick_results eos_basis '''.split():
# if name in options:
L[name]().savefig(out_dir + name + figtype, dpi=1000)
if options.show:
plt.show()
| gpl-2.0 |
molmod/molmod | molmod/test/test_unit_cells.py | 1 | 20815 | # -*- coding: utf-8 -*-
# MolMod is a collection of molecular modelling tools for python.
# Copyright (C) 2007 - 2019 Toon Verstraelen <[email protected]>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of MolMod.
#
# MolMod is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# MolMod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from __future__ import division
import numpy as np
import pytest
from molmod.test.common import BaseTestCase
from molmod import *
__all__ = ["UnitCellTestCase"]
def get_random_uc(scale=2, num_active=3, min_spacing=0.1):
while True:
active = np.random.randint(0, 2, 3).astype(bool)
if active.sum() == num_active:
break
while True:
try:
result = UnitCell(np.random.uniform(-scale, scale, (3, 3)), active)
if not active.any() or result.spacings[active].min() >= min_spacing:
return result
except ValueError:
pass
class UnitCellTestCase(BaseTestCase):
def test_parameters(self):
for counter in range(100):
in_lengths = np.random.uniform(0.5, 1, (3,))
in_angles = np.random.uniform(0.3, np.pi/2, (3,))
try:
uc = UnitCell.from_parameters3(in_lengths, in_angles)
except ValueError as e:
continue
out_lengths, out_angles = uc.parameters
self.assertArraysAlmostEqual(in_lengths, out_lengths)
self.assertArraysAlmostEqual(in_angles, out_angles)
def test_reciprocal(self):
for counter in range(100):
uc = get_random_uc(num_active=2)
if uc.active[0]:
self.assertAlmostEqual(np.dot(uc.matrix[:,0], uc.reciprocal[:,0]), 1.0)
self.assertAlmostEqual(np.dot(uc.matrix[:,0], uc.reciprocal[:,1]), 0.0)
self.assertAlmostEqual(np.dot(uc.matrix[:,0], uc.reciprocal[:,2]), 0.0)
if uc.active[1]:
self.assertAlmostEqual(np.dot(uc.matrix[:,1], uc.reciprocal[:,0]), 0.0)
self.assertAlmostEqual(np.dot(uc.matrix[:,1], uc.reciprocal[:,1]), 1.0)
self.assertAlmostEqual(np.dot(uc.matrix[:,1], uc.reciprocal[:,2]), 0.0)
if uc.active[2]:
self.assertAlmostEqual(np.dot(uc.matrix[:,2], uc.reciprocal[:,0]), 0.0)
self.assertAlmostEqual(np.dot(uc.matrix[:,2], uc.reciprocal[:,1]), 0.0)
self.assertAlmostEqual(np.dot(uc.matrix[:,2], uc.reciprocal[:,2]), 1.0)
def test_reciprocal_bis(self):
for i in range(100):
uc = get_random_uc(num_active=2)
active, inactive = uc.active_inactive
for i in inactive:
self.assertEqual(abs(uc.reciprocal[:,i]).max(), 0.0)
if len(active) == 1:
unit = uc.reciprocal[:,active[0]].copy()
r = np.linalg.norm(unit)
unit /= r
cell_vector = unit/r
self.assertArraysAlmostEqual(cell_vector, uc.matrix[:,active[0]])
elif len(active) == 2:
# construct an auxiliary normal vector
normal = np.cross(uc.reciprocal[:,active[0]], uc.reciprocal[:,active[1]])
norm = np.linalg.norm(normal)
# reconstruct the cell vectors
cell_vector0 = np.cross(uc.reciprocal[:,active[1]], normal)/norm**2
cell_vector1 = -np.cross(uc.reciprocal[:,active[0]], normal)/norm**2
self.assertArraysAlmostEqual(cell_vector0, uc.matrix[:,active[0]])
self.assertArraysAlmostEqual(cell_vector1, uc.matrix[:,active[1]])
elif len(active) == 3:
self.assertArraysEqual(uc.reciprocal, uc.reciprocal)
def test_to_fractional(self):
for i in range(100):
uc = get_random_uc()
fractional = np.random.uniform(-0.5, 0.5, 3)
cartesian = fractional[0]*uc.matrix[:,0] + fractional[1]*uc.matrix[:,1] + fractional[2]*uc.matrix[:,2]
fractional_bis = uc.to_fractional(cartesian)
self.assertArraysAlmostEqual(fractional, fractional_bis)
for i in range(100):
uc = get_random_uc()
cartesian = np.random.uniform(-3, 3, (10,3))
fractional = uc.to_fractional(cartesian)
for i in range(10):
fractional_bis = uc.to_fractional(cartesian[i])
self.assertArraysAlmostEqual(fractional[i], fractional_bis)
def test_to_cartesian(self):
for i in range(100):
uc = get_random_uc()
cartesian = np.random.uniform(-3, 3, 3)
fractional = cartesian[0]*uc.reciprocal[0] + cartesian[1]*uc.reciprocal[1] + cartesian[2]*uc.reciprocal[2]
cartesian_bis = uc.to_cartesian(fractional)
self.assertArraysAlmostEqual(cartesian, cartesian_bis)
for i in range(100):
uc = get_random_uc()
fractional = np.random.uniform(-0.5, 0.5, (10,3))
cartesian = uc.to_cartesian(fractional)
for i in range(10):
cartesian_bis = uc.to_cartesian(fractional[i])
self.assertArraysAlmostEqual(cartesian[i], cartesian_bis)
def test_consistency(self):
for i in range(100):
uc = get_random_uc()
cartesian = np.random.uniform(-3, 3, 3)
fractional = uc.to_fractional(cartesian)
cartesian_bis = uc.to_cartesian(fractional)
self.assertArraysAlmostEqual(cartesian, cartesian_bis)
for i in range(100):
uc = get_random_uc()
fractional = np.random.uniform(-0.5, 0.5, (10,3))
cartesian = uc.to_cartesian(fractional)
fractional_bis = uc.to_fractional(cartesian)
self.assertArraysAlmostEqual(fractional, fractional_bis)
def test_add_periodicities(self):
for counter in range(100):
uc0 = UnitCell(np.identity(3, float), np.zeros(3,bool))
uc1 = uc0.add_cell_vector(np.random.uniform(-2,2,3))
uc2 = uc1.add_cell_vector(np.random.uniform(-2,2,3))
uc3 = uc2.add_cell_vector(np.random.uniform(-2,2,3))
@pytest.mark.xfail
def test_shortest_vector(self):
# simple cases
uc = UnitCell(np.identity(3,float)*3)
self.assertArraysAlmostEqual(uc.shortest_vector([3, 0, 1]), np.array([0, 0, 1]))
self.assertArraysAlmostEqual(uc.shortest_vector([-3, 0, 1]), np.array([0, 0, 1]))
self.assertArraysAlmostEqual(uc.shortest_vector([-2, 0, 1]), np.array([1, 0, 1]))
self.assertArraysAlmostEqual(uc.shortest_vector([-1.6, 1, 1]), np.array([1.4, 1, 1]))
self.assertArraysAlmostEqual(uc.shortest_vector([-1.4, 1, 1]), np.array([-1.4, 1, 1]))
# simple cases
uc = UnitCell(np.identity(3,float)*3, np.array([True, False, False]))
self.assertArraysAlmostEqual(uc.shortest_vector([3, 0, 1]), np.array([0, 0, 1]))
self.assertArraysAlmostEqual(uc.shortest_vector([3, 0, 3]), np.array([0, 0, 3]))
# random tests
for uc_counter in range(1000):
uc = get_random_uc(num_active=2)
for r_counter in range(10):
r0 = np.random.normal(0, 10, 3)
r1 = uc.shortest_vector(r0)
change = r1 - r0
assert np.dot(change, r0) <= 0
#assert np.linalg.norm(r0) >= np.linalg.norm(r1)
index = uc.to_fractional(r0-r1)
self.assertArraysAlmostEqual(index, np.round(index), doabs=True)
index = uc.to_fractional(r1)
assert index.max()<0.5
assert index.max()>=-0.5
r0 = np.random.normal(0, 10, (10,3))
r1 = uc.shortest_vector(r0)
for i in range(10):
r1_row_bis = uc.shortest_vector(r0[i])
self.assertArraysAlmostEqual(r1_row_bis, r1[i], doabs=True)
def test_shortest_vector_trivial(self):
uc = UnitCell(np.identity(3, float))
half = np.array([0.5,0.5,0.5])
self.assertArraysEqual(uc.shortest_vector(half), -half)
self.assertArraysEqual(uc.shortest_vector(-half), -half)
def test_spacings(self):
uc = UnitCell(np.identity(3,float)*3)
self.assertArraysAlmostEqual(uc.spacings, np.ones(3, float)*3.0)
for i in range(100):
uc = get_random_uc()
a = uc.matrix[:,0]
b = uc.matrix[:,1]
c = uc.matrix[:,2]
ap = np.cross(b,c)
ap /= np.linalg.norm(ap)
spacing = abs(np.dot(a, ap))
self.assertAlmostEqual(uc.spacings[0], spacing)
def test_radius_ranges(self):
for i in range(20):
uc = get_random_uc()
radius = np.random.uniform(1,5)
ranges = uc.get_radius_ranges(radius)
for j in range(100):
c0 = uc.to_cartesian(np.random.uniform(-0.5, 0.5, 3))
c1 = c0 + radius*random_unit()
f1 = uc.to_fractional(c1)
assert (abs(f1) <= ranges+0.5).all(), "f1=%s ranges=%s" % (f1, ranges)
def test_radius_ranges_2d(self):
uc = UnitCell(np.identity(3, float), np.array([True, True, False]))
self.assertArraysEqual(uc.get_radius_ranges(3), np.array([3,3,0]))
def test_radius_indexes(self):
for i in range(20):
uc = get_random_uc()
radius = np.random.uniform(1,2)*abs(uc.volume)**(0.333)
#uc = UnitCell.from_parameters3(
# np.array([3.73800243, 2.35503196, 3.25130153]),
# np.array([29.78777448, 64.81228452, 86.7641093])*deg,
#)
#lengths, angles = uc.parameters
#radius = 1.98042040465
#matrix = np.array([[2,0,0],[0,2,0],[0,0,2]], float)
#uc = UnitCell(matrix)
#radius = 5.3
#lengths, angles = uc.parameters
#print lengths
#print angles/deg
#print radius
ranges = uc.get_radius_ranges(radius)
#print "ranges", ranges
if np.product(ranges) > 100:
continue
indexes = uc.get_radius_indexes(radius)
assert len(indexes)*abs(uc.volume) > 4.0/3.0*np.pi*radius**3
#print "testing distances"
for j in range(20):
c0 = uc.to_cartesian(np.random.uniform(-0.5, 0.5, 3))
c1 = uc.to_cartesian(np.random.uniform(-0.5, 0.5, 3))
relative = c1 - c0
# compute all distances between c0 and c1 based on radius
# ranges
distances_slow = []
for i0 in range(-ranges[0], ranges[0]+1):
for i1 in range(-ranges[1], ranges[1]+1):
for i2 in range(-ranges[2], ranges[2]+1):
delta = uc.to_cartesian([i0,i1,i2])
distance = np.linalg.norm(relative + delta)
if distance <= radius:
distances_slow.append(distance)
distances_slow.sort()
distances_fast = []
for index in indexes:
delta = uc.to_cartesian(index)
distance = np.linalg.norm(relative + delta)
if distance <= radius:
distances_fast.append(distance)
distances_fast.sort()
#print distances_slow
#print distances_fast
#print
self.assertArraysAlmostEqual(np.array(distances_slow), np.array(distances_fast))
def test_radius_indexes_1d(self):
uc = UnitCell(np.identity(3, float), np.array([True, False, False]))
indexes = uc.get_radius_indexes(0.5)
expected_indexes = np.array([
[-1, 0, 0],
[ 0, 0, 0],
[ 1, 0, 0],
])
self.assertArraysEqual(indexes, expected_indexes)
uc = UnitCell(np.identity(3, float), np.array([True, False, False]))
indexes = uc.get_radius_indexes(1.8, np.array([4,-1,-1]))
expected_indexes = np.array([
[-2, 0, 0],
[-1, 0, 0],
[ 0, 0, 0],
[ 1, 0, 0],
])
self.assertArraysEqual(indexes, expected_indexes)
def test_radius_indexes_2d(self):
uc = UnitCell(np.identity(3, float), np.array([True, True, False]))
indexes = uc.get_radius_indexes(0.5)
expected_indexes = np.array([
[-1, -1, 0],
[-1, 0, 0],
[-1, 1, 0],
[ 0, -1, 0],
[ 0, 0, 0],
[ 0, 1, 0],
[ 1, -1, 0],
[ 1, 0, 0],
[ 1, 1, 0],
])
self.assertArraysEqual(indexes, expected_indexes)
@pytest.mark.xfail
def test_radius_indexes_2d_graphical(self):
#uc = UnitCell(np.array([
# [2.0, 1.0, 0.0],
# [0.0, 0.2, 0.0],
# [0.0, 0.0, 10.0],
#]))
#radius = 0.8
uc = UnitCell(np.array([
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 10.0],
]))
radius = 5.3
#uc = UnitCell(np.array([
# [1.0, 1.0, 0.0],
# [0.0, 1.0, 0.0],
# [0.0, 0.0, 1.0],
#]))
#radius = 0.9
fracs = np.arange(-0.5, 0.55, 0.1)
import pylab
from matplotlib.patches import Circle, Polygon
from matplotlib.lines import Line2D
pylab.clf()
for i0 in fracs:
for i1 in fracs:
center = uc.to_cartesian([i0,i1,0.0])
pylab.gca().add_artist(Circle((center[0], center[1]), radius, fill=True, fc='#7777AA', ec='none'))
pylab.gca().add_artist(Circle((0, 0), radius, fill=True, fc='#0000AA', ec='none'))
ranges = uc.get_radius_ranges(radius)
indexes = uc.get_radius_indexes(radius)
for i in range(-ranges[0]-1, ranges[0]+1):
start = uc.to_cartesian([i+0.5, -ranges[1]-0.5, 0])
end = uc.to_cartesian([i+0.5, ranges[1]+0.5, 0])
pylab.gca().add_artist(Line2D([start[0], end[0]], [start[1], end[1]], color="k", linewidth=1))
for i in range(-ranges[1]-1, ranges[1]+1):
start = uc.to_cartesian([-ranges[0]-0.5, i+0.5, 0])
end = uc.to_cartesian([ranges[0]+0.5, i+0.5, 0])
pylab.gca().add_artist(Line2D([start[0], end[0]], [start[1], end[1]], color="k", linewidth=1))
for i in range(-ranges[0], ranges[0]+1):
start = uc.to_cartesian([i, -ranges[1]-0.5, 0])
end = uc.to_cartesian([i, ranges[1]+0.5, 0])
pylab.gca().add_artist(Line2D([start[0], end[0]], [start[1], end[1]], color="k", linewidth=0.5, linestyle="--"))
for i in range(-ranges[1], ranges[1]+1):
start = uc.to_cartesian([-ranges[0]-0.5, i, 0])
end = uc.to_cartesian([ranges[0]+0.5, i, 0])
pylab.gca().add_artist(Line2D([start[0], end[0]], [start[1], end[1]], color="k", linewidth=0.5, linestyle="--"))
for i0,i1,i2 in indexes:
if i2 != 0:
continue
corners = uc.to_cartesian(np.array([
[i0-0.5, i1-0.5, 0.0],
[i0-0.5, i1+0.5, 0.0],
[i0+0.5, i1+0.5, 0.0],
[i0+0.5, i1-0.5, 0.0],
]))
pylab.gca().add_artist(Polygon(corners[:,:2], fill=True, ec='none', fc='r', alpha=0.5))
corners = uc.to_cartesian(np.array([
[-ranges[0]-0.5, -ranges[1]-0.5, 0.0],
[-ranges[0]-0.5, +ranges[1]+0.5, 0.0],
[+ranges[0]+0.5, +ranges[1]+0.5, 0.0],
[+ranges[0]+0.5, -ranges[1]-0.5, 0.0],
]))
pylab.xlim(1.1*corners[:,:2].min(), 1.1*corners[:,:2].max())
pylab.ylim(1.1*corners[:,:2].min(), 1.1*corners[:,:2].max())
#pylab.xlim(-1.5*radius, 1.5*radius)
#pylab.ylim(-1.5*radius, 1.5*radius)
#pylab.savefig("radius_indexes_2d.png")
def test_div(self):
for i in range(20):
uc0 = get_random_uc(num_active=2)
x = np.random.uniform(1,2,3)
uc1 = uc0/x
self.assertArraysAlmostEqual(uc0.matrix/x, uc1.matrix)
self.assertArraysEqual(uc0.active, uc1.active)
self.assertAlmostEqual(
np.dot(uc0.matrix[:,0], uc1.matrix[:,0])
/np.linalg.norm(uc0.matrix[:,0])
/np.linalg.norm(uc1.matrix[:,0]), 1
)
self.assertAlmostEqual(
np.dot(uc0.matrix[:,1], uc1.matrix[:,1])
/np.linalg.norm(uc0.matrix[:,1])
/np.linalg.norm(uc1.matrix[:,1]), 1
)
self.assertAlmostEqual(
np.dot(uc0.matrix[:,2], uc1.matrix[:,2])
/np.linalg.norm(uc0.matrix[:,2])
/np.linalg.norm(uc1.matrix[:,2]), 1
)
def test_mul(self):
for i in range(20):
uc0 = get_random_uc(num_active=2)
x = np.random.uniform(1,2,3)
uc1 = uc0*x
self.assertArraysAlmostEqual(uc0.matrix*x, uc1.matrix)
self.assertArraysEqual(uc0.active, uc1.active)
self.assertAlmostEqual(
np.dot(uc0.matrix[:,0], uc1.matrix[:,0])
/np.linalg.norm(uc0.matrix[:,0])
/np.linalg.norm(uc1.matrix[:,0]), 1
)
self.assertAlmostEqual(
np.dot(uc0.matrix[:,1], uc1.matrix[:,1])
/np.linalg.norm(uc0.matrix[:,1])
/np.linalg.norm(uc1.matrix[:,1]), 1
)
self.assertAlmostEqual(
np.dot(uc0.matrix[:,2], uc1.matrix[:,2])
/np.linalg.norm(uc0.matrix[:,2])
/np.linalg.norm(uc1.matrix[:,2]), 1
)
def test_volume(self):
matrix = np.array([[10,0,0],[0,10,0],[0,0,10]], float)
self.assertAlmostEqual(UnitCell(matrix, np.array([False,False,False])).volume, -1)
self.assertAlmostEqual(UnitCell(matrix, np.array([True,False,False])).volume, 10)
self.assertAlmostEqual(UnitCell(matrix, np.array([True,True,False])).volume, 100)
self.assertAlmostEqual(UnitCell(matrix, np.array([True,True,True])).volume, 1000)
def test_alignment_a(self):
matrix = np.array([[10,10,0],[-10,10,0],[0,0,10]], float)
uc = UnitCell(matrix)
r = uc.alignment_a
sh = np.sqrt(0.5)
expected_r = np.array([
[sh, -sh, 0],
[sh, sh, 0],
[0, 0, 1],
], float)
self.assertArraysAlmostEqual(r.r, expected_r)
uc = r*uc
expected_matrix = np.array([
[10/sh, 0, 0],
[0, 10/sh, 0],
[0, 0, 10],
])
self.assertArraysAlmostEqual(uc.matrix, expected_matrix)
def test_alignment_c(self):
matrix = np.array([[10,0,0],[0,10,-10],[0,10,10]], float)
uc = UnitCell(matrix)
r = uc.alignment_c
uc = r*uc
sh = np.sqrt(0.5)
expected_matrix = np.array([
[10, 0, 0],
[0, 10/sh, 0],
[0, 0, 10/sh],
])
self.assertArraysAlmostEqual(uc.matrix, expected_matrix)
def test_shortest_vector_aperiodic(self):
unit_cell = UnitCell(np.identity(3, float), np.zeros(3, bool))
shortest = unit_cell.shortest_vector(np.ones(3, float))
expected = np.ones(3, float)
self.assertArraysAlmostEqual(shortest, expected)
def test_ordered(self):
for i in range(10):
uc0 = get_random_uc(num_active=2)
uc1 = uc0.ordered
self.assertAlmostEqual(uc0.volume, uc1.volume)
self.assertEqual(uc0.active.sum(), uc1.active.sum())
assert uc1.active[0] >= uc1.active[1]
assert uc1.active[1] >= uc1.active[2]
for i1, i0 in enumerate(uc0.active_inactive[0]):
self.assertArraysAlmostEqual(uc0.matrix[:,i0], uc1.matrix[:,i1])
self.assertEqual(uc0.active[i0], uc1.active[i1])
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/examples/pylab_examples/font_table_ttf.py | 3 | 1771 | #!/usr/bin/env python
# -*- noplot -*-
"""
matplotlib has support for freetype fonts. Here's a little example
using the 'table' command to build a font table that shows the glyphs
by character code.
Usage python font_table_ttf.py somefile.ttf
"""
import sys
import os
import matplotlib
from matplotlib.ft2font import FT2Font
from matplotlib.font_manager import FontProperties
from pylab import figure, table, show, axis, title
import six
from six import unichr
# the font table grid
labelc = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F']
labelr = ['00', '10', '20', '30', '40', '50', '60', '70', '80', '90',
'A0', 'B0', 'C0', 'D0', 'E0', 'F0']
if len(sys.argv) > 1:
fontname = sys.argv[1]
else:
fontname = os.path.join(matplotlib.get_data_path(),
'fonts', 'ttf', 'Vera.ttf')
font = FT2Font(fontname)
codes = list(font.get_charmap().items())
codes.sort()
# a 16,16 array of character strings
chars = [['' for c in range(16)] for r in range(16)]
colors = [[(0.95, 0.95, 0.95) for c in range(16)] for r in range(16)]
figure(figsize=(8, 4), dpi=120)
for ccode, glyphind in codes:
if ccode >= 256:
continue
r, c = divmod(ccode, 16)
s = unichr(ccode)
chars[r][c] = s
lightgrn = (0.5, 0.8, 0.5)
title(fontname)
tab = table(cellText=chars,
rowLabels=labelr,
colLabels=labelc,
rowColours=[lightgrn]*16,
colColours=[lightgrn]*16,
cellColours=colors,
cellLoc='center',
loc='upper left')
for key, cell in tab.get_celld().items():
row, col = key
if row > 0 and col > 0:
cell.set_text_props(fontproperties=FontProperties(fname=fontname))
axis('off')
show()
| mit |
Odingod/mne-python | mne/decoding/tests/test_csp.py | 6 | 3498 | # Author: Alexandre Gramfort <[email protected]>
# Romain Trachel <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import assert_array_almost_equal
from mne import io, Epochs, read_events, pick_types
from mne.decoding.csp import CSP
from mne.utils import requires_sklearn
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 8
def test_csp():
"""Test Common Spatial Patterns algorithm on epochs
"""
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
csp = CSP(n_components=n_components)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).transform(epochs_data),
X)
# test init exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs, y)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
@requires_sklearn
def test_regularized_csp():
"""Test Common Spatial Patterns algorithm using regularized covariance
"""
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
reg_cov = [None, 0.05, 'lws', 'oas']
for reg in reg_cov:
csp = CSP(n_components=n_components, reg=reg)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).
transform(epochs_data), X)
# test init exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs, y)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| mit |
paulmueller/PyCorrFit | tests/test_simple.py | 2 | 1310 | import numpy as np
from pycorrfit.correlation import Correlation
from pycorrfit.fit import Fit
def create_corr():
corr = Correlation()
tau = np.exp(np.linspace(np.log(1e-3), np.log(1e6), 10))
data = corr.fit_model(corr.fit_parameters, tau)
noise = (np.random.random(data.shape[0])-.5)*.0005
data += noise
corr.correlation = np.dstack((tau, data))[0]
return corr
def test_simple_corr():
corr = create_corr()
oldparms = corr.fit_parameters.copy()
temp = corr.fit_parameters
temp[0] *= 2
temp[-1] *= .1
Fit(corr)
res = oldparms - corr.fit_parameters
assert np.allclose(res, np.zeros_like(res), atol=0.010)
if __name__ == "__main__":
import matplotlib.pylab as plt
corr = create_corr()
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_xscale("log")
ax2.set_xscale("log")
print(corr.fit_parameters)
temp = corr.fit_parameters
temp[0] *= 2
temp[-1] *= .1
ax1.plot(corr.correlation_fit[:, 0], corr.correlation_fit[:, 1])
ax1.plot(corr.modeled_fit[:, 0], corr.modeled_fit[:, 1])
print(corr.fit_parameters)
Fit(corr)
print(corr.fit_parameters)
ax2.plot(corr.correlation_fit[:, 0], corr.correlation_fit[:, 1])
ax2.plot(corr.modeled_fit[:, 0], corr.modeled_fit[:, 1])
plt.show()
| gpl-2.0 |
aayushidwivedi01/spark-tk | regression-tests/sparktkregtests/testcases/frames/frame_binary_classification_test.py | 13 | 9995 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" tests multiclass classification metrics"""
import unittest
from sparktkregtests.lib import sparktk_test
class BinaryClassificationMetrics(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Tests binary classification"""
super(BinaryClassificationMetrics, self).setUp()
self.dataset = [("blue", 1, 0, 0),
("blue", 3, 1, 0),
("green", 1, 0, 0),
("green", 0, 1, 0)]
self.schema = [("a", str),
("b", int),
("labels", int),
("predictions", int)]
self.frame = self.context.frame.create(self.dataset,
schema=self.schema)
def test_binary_classification_metrics(self):
"""test binary classification metrics with normal data"""
# call the binary classification metrics function
class_metrics = self.frame.binary_classification_metrics("labels",
"predictions",
1,
1)
# get the confusion matrix values
conf_matrix = class_metrics.confusion_matrix.values
# labeling each of the cells in our confusion matrix
# makes this easier for me to read
# the confusion matrix should look something like this:
# predicted pos predicted neg
# actual pos [0][0] [0][1]
# actual neg [1][0] [1][1]
true_pos = conf_matrix[0][0]
false_neg = conf_matrix[0][1]
false_pos = conf_matrix[1][0]
true_neg = conf_matrix[1][1]
# the total number of predictions, total number pos and neg
total_pos = true_pos + false_neg
total_neg = true_neg + false_pos
total = total_pos + total_neg
# recall is defined in the docs as the total number of true pos
# results divided by the false negatives + pos
recall = true_pos / (false_neg + true_pos)
# from the docs, precision = true pos / false pos + true pos
precision = true_pos / (false_pos + true_pos)
# from the docs this is the def of f_measure
f_measure = (recall * precision) / (recall + precision)
# according to the documentation the accuracy
# is defined as the total correct predictions divided by the
# total number of predictions
accuracy = float(true_pos + true_neg) / float(total)
pos_count = 0
pandas_frame = self.frame.to_pandas()
# calculate the number of pos results and neg results in the data
for index, row in pandas_frame.iterrows():
if row["labels"] is 1:
pos_count = pos_count + 1
neg_count = total - pos_count
# finally we compare our results with sparktk's
self.assertAlmostEqual(class_metrics.recall, recall)
self.assertAlmostEqual(class_metrics.precision, precision)
self.assertAlmostEqual(class_metrics.f_measure, f_measure)
self.assertAlmostEqual(class_metrics.accuracy, accuracy)
self.assertEqual(total_pos, pos_count)
self.assertEqual(total_neg, neg_count)
def test_binary_classification_metrics_bad_beta(self):
"""Test binary classification metrics with negative beta"""
# should throw an error because beta must be >0
with self.assertRaisesRegexp(Exception, "greater than or equal to 0"):
class_metrics = self.frame.binary_classification_metrics("labels",
"predictions",
1,
beta=-1)
def test_binary_classification_metrics_valid_beta(self):
"""test binary class metrics with a valid value for beta"""
# this is a valid value for beta so this should not throw an error
class_metrics = self.frame.binary_classification_metrics("labels",
"predictions",
1,
beta=2)
def test_binary_classification_matrics_with_invalid_beta_type(self):
"""Test binary class metrics with a beta of invalid type"""
with self.assertRaisesRegexp(Exception, "could not convert string to float"):
class_metrics = self.frame.binary_classification_metrics("labels",
"predictions",
1,
beta="bla")
def test_binary_classification_metrics_with_invalid_pos_label(self):
"""Test binary class metrics with a pos label that does not exist"""
# should not error but should return no pos predictions
class_metrics = self.frame.binary_classification_metrics("labels",
"predictions",
"bla",
1)
# assert that no positive results were found since
# there are no labels in the data with "bla"
conf_matrix = class_metrics.confusion_matrix.values
# assert no predicted pos actual pos
self.assertEqual(conf_matrix[0][0], 0)
# assert no actual pos predicted neg
self.assertEqual(conf_matrix[1][0], 0)
def test_binary_classification_metrics_with_frequency_col(self):
"""test binay class metrics with a frequency column"""
dataset = [("blue", 1, 0, 0, 1),
("blue", 3, 1, 0, 1),
("green", 1, 0, 0, 3),
("green", 0, 1, 0, 1)]
schema = [("a", str),
("b", int),
("labels", int),
("predictions", int),
("frequency", int)]
frame = self.context.frame.create(dataset, schema=schema)
class_metrics = frame.binary_classification_metrics("labels",
"predictions",
1,
1,
frequency_column="frequency")
conf_matrix = class_metrics.confusion_matrix.values
true_pos = conf_matrix[0][0]
false_neg = conf_matrix[0][1]
false_pos = conf_matrix[1][0]
true_neg = conf_matrix[1][1]
total_pos = true_pos + false_neg
total_neg = true_neg + false_pos
total = total_pos + total_neg
# these calculations use the definitions from the docs
recall = true_pos / (false_neg + true_pos)
precision = true_pos / (false_pos + true_pos)
f_measure = (recall * precision) / (recall + precision)
accuracy = float(true_pos + true_neg) / float(total)
pos_count = 0
pandas_frame = self.frame.to_pandas()
# calculate the number of pos results and neg results in the data
for index, row in pandas_frame.iterrows():
if row["labels"] is 1:
pos_count = pos_count + 1
neg_count = total - pos_count
# finally we check that our values match sparktk's
self.assertAlmostEqual(class_metrics.recall, recall)
self.assertAlmostEqual(class_metrics.precision, precision)
self.assertAlmostEqual(class_metrics.f_measure, f_measure)
self.assertAlmostEqual(class_metrics.accuracy, accuracy)
self.assertEqual(total_pos, pos_count)
self.assertEqual(total_neg, neg_count)
def test_binary_classification_metrics_with_invalid_frequency_col(self):
"""test binary class metrics with a frequency col of invalid type"""
dataset = [("blue", 1, 0, 0, "bla"),
("blue", 3, 1, 0, "bla"),
("green", 1, 0, 0, "bla"),
("green", 0, 1, 0, "bla")]
schema = [("a", str),
("b", int),
("labels", int),
("predictions", int),
("frequency", str)]
frame = self.context.frame.create(dataset, schema=schema)
# this should throw an error because the frequency col
# we provided is of type str but should be of type int
with self.assertRaisesRegexp(Exception, "NumberFormatException"):
class_metrics = frame.binary_classification_metrics("labels",
"predictions",
1,
1,
frequency_column="frequency")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
saguziel/incubator-airflow | setup.py | 1 | 9154 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import sys
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted changes
are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Git repo not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
# is it release of `version` ?
try:
tag = repo.git.describe(
match='[0-9]*', exact_match=True,
tags=True, dirty=True)
assert tag == version, (tag, version)
return '.release:{version}+{sha}'.format(version=version,
sha=sha)
except git.GitCommandError:
return '.dev0+{sha}'.format(sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
celery = [
'celery>=3.1.17',
'flower>=0.7.3'
]
cgroups = [
'cgroupspy>=0.1.4',
]
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.15.2, <2'
]
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker-py>=1.6.0']
druid = ['pydruid>=0.2.1']
emr = ['boto3>=1.0.0']
gcp_api = [
'httplib2',
'google-api-python-client>=1.5.0, <1.6.0',
'oauth2client>=2.0.2, <2.1.0',
'PyOpenSSL',
]
hdfs = ['snakebite>=2.7.8']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
jira = ['JIRA>1.0.7']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'impyla>=0.13.3',
'unicodecsv>=0.14.1'
]
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1']
mysql = ['mysqlclient>=1.3.6']
rabbitmq = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.6']
salesforce = ['simple-salesforce>=0.72']
s3 = [
'boto>=2.36.0',
'filechunkio>=1.6',
]
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=1.0.0']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
ldap = ['ldap3>=0.9.9.1']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8',
'kerberos>=1.2.5']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
qds = ['qds-sdk>=1.9.0']
cloudant = ['cloudant>=0.5.9,<2.0'] # major update coming soon, clamp to 0.x
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant
devel = [
'click',
'freezegun',
'jira',
'lxml>=3.3.4',
'mock',
'moto',
'nose',
'nose-ignore-docstring==0.2',
'nose-parameterized',
'nose-timer',
'rednose'
]
devel_minreq = devel + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = devel + all_dbs + doc + samba + s3 + slack + crypto + oracle + docker
def do_setup():
write_version()
setup(
name='airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'croniter>=0.3.8, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.11, <0.12',
'flask-admin==1.4.1',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'flask-swagger==0.2.13',
'flask-wtf==0.12',
'funcsigs==1.0.0',
'future>=0.15.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.3.0, <19.4.0', # 19.4.? seemed to have issues
'jinja2>=2.7.3, <2.9.0',
'lxml>=3.6.0, <4.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'psutil>=4.2.0, <5.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.14.2',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'tabulate>=0.7.5, <0.8.0',
'thrift>=0.9.2, <0.10',
'zope.deprecation>=4.0, <5.0',
],
extras_require={
'all': devel_all,
'all_dbs': all_dbs,
'async': async,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'kerberos': kerberos,
'ldap': ldap,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'slack': slack,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'jira': jira,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='[email protected]',
url='http://airflow.incubator.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/incubator/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
},
)
if __name__ == "__main__":
do_setup()
| apache-2.0 |
dhruv13J/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
hainm/statsmodels | statsmodels/sandbox/examples/example_gam_0.py | 33 | 4574 | '''first examples for gam and PolynomialSmoother used for debugging
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Note: uncomment plt.show() to display graphs
'''
example = 2 #3 # 1,2 or 3
import numpy as np
from statsmodels.compat.python import zip
import numpy.random as R
import matplotlib.pyplot as plt
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
#np.random.seed(987654)
standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 500
lb, ub = -1., 1. #for Poisson
#lb, ub = -0.75, 2 #0.75 #for Binomial
x1 = R.uniform(lb, ub, nobs) #R.standard_normal(nobs)
x1 = np.linspace(lb, ub, nobs)
x1.sort()
x2 = R.uniform(lb, ub, nobs) #
#x2 = R.standard_normal(nobs)
x2.sort()
#x2 = np.cos(x2)
x2 = x2 + np.exp(x2/2.)
#x2 = np.log(x2-x2.min()+0.1)
y = 0.5 * R.uniform(lb, ub, nobs) #R.standard_normal((nobs,))
f1 = lambda x1: (2*x1 - 0.5 * x1**2 - 0.75 * x1**3) # + 0.1 * np.exp(-x1/4.))
f2 = lambda x2: (x2 - 1* x2**2) # - 0.75 * np.exp(x2))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) + 1 # 0.1
#try this
z = f1(x1) + f2(x2)
#z = demean(z)
z -= np.median(z)
print('z.std()', z.std())
#z = standardize(z) + 0.2
# with standardize I get better values, but I don't know what the true params are
print(z.mean(), z.min(), z.max())
#y += z #noise
y = z
d = np.array([x1,x2]).T
if example == 1:
print("normal")
m = AdditiveModel(d)
m.fit(y)
x = np.linspace(-2,2,50)
print(m)
import scipy.stats, time
if example == 2:
print("binomial")
mod_name = 'Binomial'
f = families.Binomial()
#b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(z)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
#for plotting
yp = f.link.inverse(y)
p = b
if example == 3:
print("Poisson")
f = families.Poisson()
#y = y/y.max() * 3
yp = f.link.inverse(z)
#p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(z)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
if example > 1:
y_pred = m.results.mu# + m.results.alpha#m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(p, '.')
plt.plot(yp, 'b-', label='true')
plt.plot(y_pred, 'r-', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM ' + mod_name)
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x1, x2]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], p[sortidx], '.')
plt.plot(xx[sortidx], yp[sortidx], 'b.', label='true')
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM ' + mod_name + ' ' + ii)
counter += 1
# counter = 2
# for ii, xx in zip(['z', 'x1', 'x2'], [z, x1, x2]):
# #plt.figure()
# plt.subplot(2, 2, counter)
# plt.plot(xx, p, '.')
# plt.plot(xx, yp, 'b-', label='true')
# plt.plot(xx, y_pred, 'r-', label='GAM')
# plt.legend(loc='upper left')
# plt.title('gam.GAM Poisson ' + ii)
# counter += 1
plt.figure()
plt.plot(z, 'b-', label='true' )
plt.plot(np.log(m.results.mu), 'r-', label='GAM')
plt.title('GAM Poisson, raw')
plt.figure()
plt.plot(x1, standardize(m.smoothers[0](x1)), 'r')
plt.plot(x1, standardize(f1(x1)), linewidth=2)
plt.figure()
plt.plot(x2, standardize(m.smoothers[1](x2)), 'r')
plt.plot(x2, standardize(f2(x2)), linewidth=2)
##y_pred = m.results.predict(d)
##plt.figure()
##plt.plot(z, p, '.')
##plt.plot(z, yp, 'b-', label='true')
##plt.plot(z, y_pred, 'r-', label='AdditiveModel')
##plt.legend()
##plt.title('gam.AdditiveModel')
#plt.show()
## pylab.figure(num=1)
## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')
## pylab.plot(x1, standardize(f1(x1)), linewidth=2)
## pylab.figure(num=2)
## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')
## pylab.plot(x2, standardize(f2(x2)), linewidth=2)
## pylab.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.