repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/core/common.py | 7 | 15175 | """
Misc tools for implementing data structures
"""
import sys
import warnings
from datetime import datetime, timedelta
from functools import partial
import numpy as np
import pandas.lib as lib
import pandas.tslib as tslib
from pandas import compat
from pandas.compat import long, zip, iteritems
from pandas.core.config import get_option
from pandas.types.generic import ABCSeries
from pandas.types.common import _NS_DTYPE
from pandas.types.inference import _iterable_not_string
from pandas.types.missing import isnull
from pandas.api import types
from pandas.types import common
# back-compat of public API
# deprecate these functions
m = sys.modules['pandas.core.common']
for t in [t for t in dir(types) if not t.startswith('_')]:
def outer(t=t):
def wrapper(*args, **kwargs):
warnings.warn("pandas.core.common.{t} is deprecated. "
"import from the public API: "
"pandas.api.types.{t} instead".format(t=t),
DeprecationWarning, stacklevel=3)
return getattr(types, t)(*args, **kwargs)
return wrapper
setattr(m, t, outer(t))
# back-compat for non-public functions
# deprecate these functions
for t in ['is_datetime_arraylike',
'is_datetime_or_timedelta_dtype',
'is_datetimelike',
'is_datetimelike_v_numeric',
'is_datetimelike_v_object',
'is_datetimetz',
'is_int_or_datetime_dtype',
'is_period_arraylike',
'is_string_like',
'is_string_like_dtype']:
def outer(t=t):
def wrapper(*args, **kwargs):
warnings.warn("pandas.core.common.{t} is deprecated. "
"These are not longer public API functions, "
"but can be imported from "
"pandas.types.common.{t} instead".format(t=t),
DeprecationWarning, stacklevel=3)
return getattr(common, t)(*args, **kwargs)
return wrapper
setattr(m, t, outer(t))
# deprecate array_equivalent
def array_equivalent(*args, **kwargs):
warnings.warn("'pandas.core.common.array_equivalent' is deprecated and "
"is no longer public API", DeprecationWarning, stacklevel=2)
from pandas.types import missing
return missing.array_equivalent(*args, **kwargs)
class PandasError(Exception):
pass
class PerformanceWarning(Warning):
pass
class SettingWithCopyError(ValueError):
pass
class SettingWithCopyWarning(Warning):
pass
class AmbiguousIndexError(PandasError, KeyError):
pass
class UnsupportedFunctionCall(ValueError):
pass
class AbstractMethodError(NotImplementedError):
"""Raise this error instead of NotImplementedError for abstract methods
while keeping compatibility with Python 2 and Python 3.
"""
def __init__(self, class_instance):
self.class_instance = class_instance
def __str__(self):
return ("This method must be defined in the concrete class of %s" %
self.class_instance.__class__.__name__)
def flatten(l):
"""Flatten an arbitrarily nested sequence.
Parameters
----------
l : sequence
The non string sequence to flatten
Notes
-----
This doesn't consider strings sequences.
Returns
-------
flattened : generator
"""
for el in l:
if _iterable_not_string(el):
for s in flatten(el):
yield s
else:
yield el
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
if obj.name != name:
return None
return name
def _maybe_match_name(a, b):
a_has = hasattr(a, 'name')
b_has = hasattr(b, 'name')
if a_has and b_has:
if a.name == b.name:
return a.name
else:
return None
elif a_has:
return a.name
elif b_has:
return b.name
return None
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
raise TypeError('object of type %r has no info axis' %
type(obj).__name__)
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
def _maybe_box(indexer, values, obj, key):
# if we have multiples coming back, box em
if isinstance(values, np.ndarray):
return obj[indexer.get_loc(key)]
# return the value
return values
def _maybe_box_datetimelike(value):
# turn a datetime like into a Timestamp/timedelta as needed
if isinstance(value, (np.datetime64, datetime)):
value = tslib.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslib.Timedelta(value)
return value
_values_from_object = lib.values_from_object
def is_bool_indexer(key):
if isinstance(key, (ABCSeries, np.ndarray)):
if key.dtype == np.object_:
key = np.asarray(_values_from_object(key))
if not lib.is_bool_array(key):
if isnull(key).any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
return False
return True
elif key.dtype == np.bool_:
return True
elif isinstance(key, list):
try:
arr = np.asarray(key)
return arr.dtype == np.bool_ and len(arr) == len(key)
except TypeError: # pragma: no cover
return False
return False
def _default_index(n):
from pandas.core.index import RangeIndex
return RangeIndex(0, n, name=None)
def _mut_exclusive(**kwargs):
item1, item2 = kwargs.items()
label1, val1 = item1
label2, val2 = item2
if val1 is not None and val2 is not None:
raise TypeError('mutually exclusive arguments: %r and %r' %
(label1, label2))
elif val1 is not None:
return val1
else:
return val2
def _not_none(*args):
return (arg for arg in args if arg is not None)
def _any_none(*args):
for arg in args:
if arg is None:
return True
return False
def _all_not_none(*args):
for arg in args:
if arg is None:
return False
return True
def _count_not_none(*args):
return sum(x is not None for x in args)
def _try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
except Exception:
return listed
def iterpairs(seq):
"""
Parameters
----------
seq : sequence
Returns
-------
iterator returning overlapping pairs of elements
Examples
--------
>>> list(iterpairs([1, 2, 3, 4]))
[(1, 2), (2, 3), (3, 4)]
"""
# input may not be sliceable
seq_it = iter(seq)
seq_it_next = iter(seq)
next(seq_it_next)
return zip(seq_it, seq_it_next)
def split_ranges(mask):
""" Generates tuples of ranges which cover all True value in mask
>>> list(split_ranges([1,0,0,1,0]))
[(0, 1), (3, 4)]
"""
ranges = [(0, len(mask))]
for pos, val in enumerate(mask):
if not val: # this pos should be ommited, split off the prefix range
r = ranges.pop()
if pos > r[0]: # yield non-zero range
yield (r[0], pos)
if pos + 1 < len(mask): # save the rest for processing
ranges.append((pos + 1, len(mask)))
if ranges:
yield ranges[-1]
def _long_prod(vals):
result = long(1)
for x in vals:
result *= x
return result
class groupby(dict):
"""
A simple groupby different from the one in itertools.
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
def __init__(self, seq, key=lambda x: x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
try:
__iter__ = dict.iteritems
except AttributeError: # pragma: no cover
# Python 3
def __iter__(self):
return iter(dict.items(self))
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
given array/list
"""
return dict([(x, i) for i, x in enumerate(arr)])
def union(*seqs):
result = set([])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result |= seq
return type(seqs[0])(list(result))
def difference(a, b):
return type(a)(list(set(a) - set(b)))
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result &= seq
return type(seqs[0])(list(result))
def _asarray_tuplesafe(values, dtype=None):
from pandas.core.index import Index
if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')):
values = list(values)
elif isinstance(values, Index):
return values.values
if isinstance(values, list) and dtype in [np.object_, object]:
return lib.list_to_object_array(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, compat.string_types):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
if isinstance(values, list):
return lib.list_to_object_array(values)
else:
# Making a 1D array that safely contains tuples is a bit tricky
# in numpy, leading to the following
try:
result = np.empty(len(values), dtype=object)
result[:] = values
except ValueError:
# we have a list-of-list
result[:] = [tuple(x) for x in values]
return result
def _index_labels_to_array(labels):
if isinstance(labels, (compat.string_types, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
labels = _asarray_tuplesafe(labels)
return labels
def _maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj
def is_null_slice(obj):
""" we have a null slice """
return (isinstance(obj, slice) and obj.start is None and
obj.stop is None and obj.step is None)
def is_full_slice(obj, l):
""" we have a full length slice """
return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and
obj.step is None)
def _get_callable_name(obj):
# typical case has name
if hasattr(obj, '__name__'):
return getattr(obj, '__name__')
# some objects don't; could recurse
if isinstance(obj, partial):
return _get_callable_name(obj.func)
# fall back to class name
if hasattr(obj, '__call__'):
return obj.__class__.__name__
# everything failed (probably because the argument
# wasn't actually callable); we return None
# instead of the empty string in this case to allow
# distinguishing between no name and a name of ''
return None
def _apply_if_callable(maybe_callable, obj, **kwargs):
"""
Evaluate possibly callable input using obj and kwargs if it is callable,
otherwise return as it is
"""
if callable(maybe_callable):
return maybe_callable(obj, **kwargs)
return maybe_callable
def _all_none(*args):
for arg in args:
if arg is not None:
return False
return True
def _where_compat(mask, arr1, arr2):
if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE:
new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8'))
return new_vals.view(_NS_DTYPE)
import pandas.tslib as tslib
if arr1.dtype == _NS_DTYPE:
arr1 = tslib.ints_to_pydatetime(arr1.view('i8'))
if arr2.dtype == _NS_DTYPE:
arr2 = tslib.ints_to_pydatetime(arr2.view('i8'))
return np.where(mask, arr1, arr2)
def _dict_compat(d):
"""
Helper function to convert datetimelike-keyed dicts to Timestamp-keyed dict
Parameters
----------
d: dict like object
Returns
-------
dict
"""
return dict((_maybe_box_datetimelike(key), value)
for key, value in iteritems(d))
def sentinel_factory():
class Sentinel(object):
pass
return Sentinel()
# ----------------------------------------------------------------------
# Detect our environment
def in_interactive_session():
""" check if we're running in an interactive shell
returns True if running under python/ipython interactive shell
"""
def check_main():
import __main__ as main
return (not hasattr(main, '__file__') or
get_option('mode.sim_interactive'))
try:
return __IPYTHON__ or check_main() # noqa
except:
return check_main()
def in_qtconsole():
"""
check if we're inside an IPython qtconsole
DEPRECATED: This is no longer needed, or working, in IPython 3 and above.
"""
try:
ip = get_ipython() # noqa
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'qtconsole' in front_end.lower():
return True
except:
return False
return False
def in_ipnb():
"""
check if we're inside an IPython Notebook
DEPRECATED: This is no longer used in pandas, and won't work in IPython 3
and above.
"""
try:
ip = get_ipython() # noqa
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'notebook' in front_end.lower():
return True
except:
return False
return False
def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
try:
ip = get_ipython() # noqa
return 'zmq' in str(type(ip)).lower()
except:
pass
return False
def _random_state(state=None):
"""
Helper function for processing random_state arguments.
Parameters
----------
state : int, np.random.RandomState, None.
If receives an int, passes to np.random.RandomState() as seed.
If receives an np.random.RandomState object, just returns object.
If receives `None`, returns np.random.
If receives anything else, raises an informative ValueError.
Default None.
Returns
-------
np.random.RandomState
"""
if types.is_integer(state):
return np.random.RandomState(state)
elif isinstance(state, np.random.RandomState):
return state
elif state is None:
return np.random
else:
raise ValueError("random_state must be an integer, a numpy "
"RandomState, or None")
| apache-2.0 |
jakevdp/Mmani | megaman/embedding/tests/test_lle.py | 4 | 4114 | # LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
import sys
import numpy as np
import scipy as sp
import scipy.sparse as sparse
from scipy.spatial.distance import squareform, pdist
from itertools import product
from numpy.testing import assert_array_almost_equal
from sklearn import manifold, datasets
from sklearn.neighbors import NearestNeighbors
import megaman.embedding.locally_linear as lle
import megaman.geometry.geometry as geom
from megaman.utils.eigendecomp import EIGEN_SOLVERS
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_lle_with_sklearn():
N = 10
X, color = datasets.samples_generator.make_s_curve(N, random_state=0)
n_components = 2
n_neighbors = 3
knn = NearestNeighbors(n_neighbors + 1).fit(X)
G = geom.Geometry()
G.set_data_matrix(X)
G.set_adjacency_matrix(knn.kneighbors_graph(X, mode = 'distance'))
sk_Y_lle = manifold.LocallyLinearEmbedding(n_neighbors, n_components, method = 'standard').fit_transform(X)
(mm_Y_lle, err) = lle.locally_linear_embedding(G, n_components)
assert(_check_with_col_sign_flipping(sk_Y_lle, mm_Y_lle, 0.05))
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
distance_matrix = squareform(pdist(X))
A = lle.barycenter_graph(distance_matrix, X)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert(np.linalg.norm(pred - X) / X.shape[0] < 1)
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 20 because the tests pass.
rng = np.random.RandomState(20)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
G = geom.Geometry(adjacency_kwds = {'radius':3})
G.set_data_matrix(X)
tol = 0.1
distance_matrix = G.compute_adjacency_matrix()
N = lle.barycenter_graph(distance_matrix, X).todense()
reconstruction_error = np.linalg.norm(np.dot(N, X) - X, 'fro')
assert(reconstruction_error < tol)
for eigen_solver in EIGEN_SOLVERS:
clf = lle.LocallyLinearEmbedding(n_components = n_components, geom = G,
eigen_solver = eigen_solver, random_state = rng)
clf.fit(X)
assert(clf.embedding_.shape[1] == n_components)
reconstruction_error = np.linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert(reconstruction_error < tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
G = geom.Geometry(adjacency_kwds = {'radius':3})
G.set_data_matrix(X)
distance_matrix = G.compute_adjacency_matrix()
tol = 1.5
N = lle.barycenter_graph(distance_matrix, X).todense()
reconstruction_error = np.linalg.norm(np.dot(N, X) - X)
assert(reconstruction_error < tol)
for eigen_solver in EIGEN_SOLVERS:
clf = lle.LocallyLinearEmbedding(n_components = n_components, geom = G,
eigen_solver = eigen_solver, random_state = rng)
clf.fit(X)
assert(clf.embedding_.shape[1] == n_components)
reconstruction_error = np.linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert(reconstruction_error < tol)
| bsd-2-clause |
mixturemodel-flow/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions_test.py | 58 | 9375 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {
key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()
}
class _FeedingFunctionsTestCase(test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {
"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchTwoWithOneEpoch(self):
array = np.arange(5) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"value_placeholder": [10, 11]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"value_placeholder": [12, 13]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"value_placeholder": [14]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {
"index_placeholder":
list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder":
np.arange(32).reshape([16, 2]).tolist() * 6 +
[[0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundredWithSmallerArrayAndMultipleEpochs(self):
array = np.arange(2) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"value_placeholder": [10, 11, 10, 11],
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {
"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchTwoWithOneEpoch(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 37)
array2 = np.arange(64, 69)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 101))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [96, 97],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [98, 99],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [100],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundredWithSmallDataArrayAndMultipleEpochs(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 34)
array2 = np.arange(64, 66)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 98))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [96, 97, 96, 97],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnBatchTwoWithOneEpoch(self):
a = np.arange(32, 37)
b = np.arange(64, 69)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnLargeBatchWithSmallArrayAndMultipleEpochs(self):
a = np.arange(32, 34)
b = np.arange(64, 66)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
test.main()
| apache-2.0 |
davidsamu/seal | seal/plot/pquality.py | 1 | 8091 | # -*- coding: utf-8 -*-
"""
Functions to plot quality metrics of units after spike sorting.
@author: David Samu
"""
import numpy as np
import pandas as pd
from quantities import us
from seal.plot import putil, pplot, pwaveform
from seal.quality import test_sorting
from seal.util import util
# %% Plot quality metrics.
def plot_qm(u, bs_stats, stab_prd_res, prd_inc, tr_inc, spk_inc,
add_lbls=False, ftempl=None, fig=None, sps=None):
"""Plot quality metrics related figures."""
# Init values.
waveforms = np.array(u.Waveforms)
wavetime = u.Waveforms.columns * us
spk_times = np.array(u.SpikeParams['time'], dtype=float)
base_rate = u.QualityMetrics['baseline']
# Minimum and maximum gain.
gmin = u.SessParams['minV']
gmax = u.SessParams['maxV']
# %% Init plots.
# Disable inline plotting to prevent memory leak.
putil.inline_off()
# Init figure and gridspec.
fig = putil.figure(fig)
if sps is None:
sps = putil.gridspec(1, 1)[0]
ogsp = putil.embed_gsp(sps, 2, 1, height_ratios=[0.02, 1])
info_sps, qm_sps = ogsp[0], ogsp[1]
# Info header.
info_ax = fig.add_subplot(info_sps)
putil.hide_axes(info_ax)
title = putil.get_unit_info_title(u)
putil.set_labels(ax=info_ax, title=title, ytitle=0.80)
# Create axes.
gsp = putil.embed_gsp(qm_sps, 3, 2, wspace=0.3, hspace=0.4)
ax_wf_inc, ax_wf_exc = [fig.add_subplot(gsp[0, i]) for i in (0, 1)]
ax_wf_amp, ax_wf_dur = [fig.add_subplot(gsp[1, i]) for i in (0, 1)]
ax_amp_dur, ax_rate = [fig.add_subplot(gsp[2, i]) for i in (0, 1)]
# Trial markers.
trial_starts, trial_stops = u.TrData.TrialStart, u.TrData.TrialStop
tr_markers = pd.DataFrame({'time': trial_starts[9::10]})
tr_markers['label'] = [str(itr+1) if i % 2 else ''
for i, itr in enumerate(tr_markers.index)]
# Common variables, limits and labels.
WF_T_START = test_sorting.WF_T_START
spk_t = u.SessParams.sampl_prd * (np.arange(waveforms.shape[1])-WF_T_START)
ses_t_lim = test_sorting.get_start_stop_times(spk_times, trial_starts,
trial_stops)
ss, sa = 1.0, 0.8 # marker size and alpha on scatter plot
# Color spikes by their occurance over session time.
my_cmap = putil.get_cmap('jet')
spk_cols = np.tile(np.array([.25, .25, .25, .25]), (len(spk_times), 1))
if np.any(spk_inc): # check if there is any spike included
spk_t_inc = np.array(spk_times[spk_inc])
tmin, tmax = float(spk_times.min()), float(spk_times.max())
spk_cols[spk_inc, :] = my_cmap((spk_t_inc-tmin) / (tmax-tmin))
# Put excluded trials to the front, and randomise order of included trials
# so later spikes don't systematically cover earlier ones.
spk_order = np.hstack((np.where(np.invert(spk_inc))[0],
np.random.permutation(np.where(spk_inc)[0])))
# Common labels for plots
ses_t_lab = 'Recording time (s)'
# %% Waveform shape analysis.
# Plot included and excluded waveforms on different axes.
# Color included by occurance in session time to help detect drifts.
s_waveforms, s_spk_cols = waveforms[spk_order, :], spk_cols[spk_order]
wf_t_lim, glim = [min(spk_t), max(spk_t)], [gmin, gmax]
wf_t_lab, volt_lab = 'WF time ($\mu$s)', 'Voltage'
for st in ('Included', 'Excluded'):
ax = ax_wf_inc if st == 'Included' else ax_wf_exc
spk_idx = spk_inc if st == 'Included' else np.invert(spk_inc)
tr_idx = tr_inc if st == 'Included' else np.invert(tr_inc)
nspsk, ntrs = sum(spk_idx), sum(tr_idx)
title = '{} WFs, {} spikes, {} trials'.format(st, nspsk, ntrs)
# Select waveforms and colors.
rand_spk_idx = spk_idx[spk_order]
wfs = s_waveforms[rand_spk_idx, :]
cols = s_spk_cols[rand_spk_idx]
# Plot waveforms.
xlab, ylab = (wf_t_lab, volt_lab) if add_lbls else (None, None)
pwaveform.plot_wfs(wfs, spk_t, cols=cols, lw=0.1, alpha=0.05,
xlim=wf_t_lim, ylim=glim, title=title,
xlab=xlab, ylab=ylab, ax=ax)
# %% Waveform summary metrics.
# Init data.
wf_amp_all = u.SpikeParams['amplitude']
wf_amp_inc = wf_amp_all[spk_inc]
wf_dur_all = u.SpikeParams['duration']
wf_dur_inc = wf_dur_all[spk_inc]
# Set common limits and labels.
dur_lim = [0, wavetime[-2]-wavetime[WF_T_START]] # same across units
glim = max(wf_amp_all.max(), gmax-gmin)
amp_lim = [0, glim]
amp_lab = 'Amplitude'
dur_lab = 'Duration ($\mu$s)'
# Waveform amplitude across session time.
m_amp, sd_amp = wf_amp_inc.mean(), wf_amp_inc.std()
title = 'WF amplitude: {:.1f} $\pm$ {:.1f}'.format(m_amp, sd_amp)
xlab, ylab = (ses_t_lab, amp_lab) if add_lbls else (None, None)
pplot.scatter(spk_times, wf_amp_all, spk_inc, c='m', bc='grey', s=ss,
xlab=xlab, ylab=ylab, xlim=ses_t_lim, ylim=amp_lim,
edgecolors='', alpha=sa, id_line=False, title=title,
ax=ax_wf_amp)
# Waveform duration across session time.
mdur, sdur = wf_dur_inc.mean(), wf_dur_inc.std()
title = 'WF duration: {:.1f} $\pm$ {:.1f} $\mu$s'.format(mdur, sdur)
xlab, ylab = (ses_t_lab, dur_lab) if add_lbls else (None, None)
pplot.scatter(spk_times, wf_dur_all, spk_inc, c='c', bc='grey', s=ss,
xlab=xlab, ylab=ylab, xlim=ses_t_lim, ylim=dur_lim,
edgecolors='', alpha=sa, id_line=False, title=title,
ax=ax_wf_dur)
# Waveform duration against amplitude.
title = 'WF duration - amplitude'
xlab, ylab = (dur_lab, amp_lab) if add_lbls else (None, None)
pplot.scatter(wf_dur_all[spk_order], wf_amp_all[spk_order],
c=spk_cols[spk_order], s=ss, xlab=xlab, ylab=ylab,
xlim=dur_lim, ylim=amp_lim, edgecolors='', alpha=sa,
id_line=False, title=title, ax=ax_amp_dur)
# %% Firing rate.
tmean = np.array(bs_stats['tmean'])
rmean = util.remove_dim_from_series(bs_stats['rate'])
prd_tstart, prd_tstop = stab_prd_res['tstart'], stab_prd_res['tstop']
# Color segments depending on whether they are included / excluded.
def plot_periods(v, color, ax):
# Plot line segments.
for i in range(len(prd_inc[:-1])):
col = color if prd_inc[i] and prd_inc[i+1] else 'grey'
x, y = [(tmean[i], tmean[i+1]), (v[i], v[i+1])]
ax.plot(x, y, color=col)
# Plot line points.
for i in range(len(prd_inc)):
col = color if prd_inc[i] else 'grey'
x, y = [tmean[i], v[i]]
ax.plot(x, y, color=col, marker='o',
markersize=3, markeredgecolor=col)
# Firing rate over session time.
title = 'Baseline rate: {:.1f} spike/s'.format(float(base_rate))
xlab, ylab = (ses_t_lab, putil.FR_lbl) if add_lbls else (None, None)
ylim = [0, 1.25*np.max(rmean)]
plot_periods(rmean, 'b', ax_rate)
pplot.lines([], [], c='b', xlim=ses_t_lim, ylim=ylim, title=title,
xlab=xlab, ylab=ylab, ax=ax_rate)
# Trial markers.
putil.plot_events(tr_markers, lw=0.5, ls='--', alpha=0.35, y_lbl=0.92,
ax=ax_rate)
# Excluded periods.
excl_prds = []
tstart, tstop = ses_t_lim
if tstart != prd_tstart:
excl_prds.append(('beg', tstart, prd_tstart))
if tstop != prd_tstop:
excl_prds.append(('end', prd_tstop, tstop))
putil.plot_periods(excl_prds, ymax=0.92, ax=ax_rate)
# %% Post-formatting.
# Maximize number of ticks on recording time axes to prevent covering.
for ax in (ax_wf_amp, ax_wf_dur, ax_rate):
putil.set_max_n_ticks(ax, 6, 'x')
# %% Save figure.
if ftempl is not None:
fname = ftempl.format(u.name_to_fname())
putil.save_fig(fname, fig, title, rect_height=0.92)
putil.inline_on()
return [ax_wf_inc, ax_wf_exc], ax_wf_amp, ax_wf_dur, ax_amp_dur, ax_rate
| gpl-3.0 |
ARudiuk/mne-python | mne/tests/test_source_estimate.py | 2 | 30296 | from __future__ import print_function
import os.path as op
from nose.tools import assert_true, assert_raises
import warnings
from copy import deepcopy
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from scipy.fftpack import fft
from mne.datasets import testing
from mne import (stats, SourceEstimate, VolSourceEstimate, Label,
read_source_spaces, MixedSourceEstimate, read_source_estimate,
morph_data, extract_label_time_course,
spatio_temporal_tris_connectivity,
spatio_temporal_src_connectivity,
spatial_inter_hemi_connectivity)
from mne.source_estimate import (compute_morph_matrix, grade_to_vertices,
grade_to_tris)
from mne.minimum_norm import read_inverse_operator
from mne.label import read_labels_from_annot, label_sign_flip
from mne.utils import (_TempDir, requires_pandas, requires_sklearn,
requires_h5py, run_tests_if_main, slow_test)
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
fname_src = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_src_3 = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-4-src.fif')
fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg')
fname_smorph = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg')
fname_fmorph = op.join(data_path, 'MEG', 'sample',
'fsaverage_audvis_trunc-meg')
fname_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w')
fname_vsrc = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
rng = np.random.RandomState(0)
@testing.requires_testing_data
def test_aaspatial_inter_hemi_connectivity():
"""Test spatial connectivity between hemispheres"""
# trivial cases
conn = spatial_inter_hemi_connectivity(fname_src_3, 5e-6)
assert_equal(conn.data.size, 0)
conn = spatial_inter_hemi_connectivity(fname_src_3, 5e6)
assert_equal(conn.data.size, np.prod(conn.shape) // 2)
# actually interesting case (1cm), should be between 2 and 10% of verts
src = read_source_spaces(fname_src_3)
conn = spatial_inter_hemi_connectivity(src, 10e-3)
conn = conn.tocsr()
n_src = conn.shape[0]
assert_true(n_src * 0.02 < conn.data.size < n_src * 0.10)
assert_equal(conn[:src[0]['nuse'], :src[0]['nuse']].data.size, 0)
assert_equal(conn[-src[1]['nuse']:, -src[1]['nuse']:].data.size, 0)
c = (conn.T + conn) / 2. - conn
c.eliminate_zeros()
assert_equal(c.data.size, 0)
# check locations
upper_right = conn[:src[0]['nuse'], src[0]['nuse']:].toarray()
assert_equal(upper_right.sum(), conn.sum() // 2)
good_labels = ['S_pericallosal', 'Unknown', 'G_and_S_cingul-Mid-Post',
'G_cuneus']
for hi, hemi in enumerate(('lh', 'rh')):
has_neighbors = src[hi]['vertno'][np.where(np.any(upper_right,
axis=1 - hi))[0]]
labels = read_labels_from_annot('sample', 'aparc.a2009s', hemi,
subjects_dir=subjects_dir)
use_labels = [l.name[:-3] for l in labels
if np.in1d(l.vertices, has_neighbors).any()]
assert_true(set(use_labels) - set(good_labels) == set())
@slow_test
@testing.requires_testing_data
def test_volume_stc():
"""Test volume STCs
"""
tempdir = _TempDir()
N = 100
data = np.arange(N)[:, np.newaxis]
datas = [data, data, np.arange(2)[:, np.newaxis]]
vertno = np.arange(N)
vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]]
vertno_reads = [vertno, vertno, np.arange(2)]
for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
stc = VolSourceEstimate(data, vertno, 0, 1)
fname_temp = op.join(tempdir, 'temp-vl.stc')
stc_new = stc
for _ in range(2):
stc_new.save(fname_temp)
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(vertno_read, stc_new.vertices)
assert_array_almost_equal(stc.data, stc_new.data)
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert_true(isinstance(stc, VolSourceEstimate))
assert_true('sample' in repr(stc))
stc_new = stc
assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
for _ in range(2):
fname_temp = op.join(tempdir, 'temp-vol.w')
stc_new.save(fname_temp, ftype='w')
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(stc.vertices, stc_new.vertices)
assert_array_almost_equal(stc.data, stc_new.data)
# save the stc as a nifti file and export
try:
import nibabel as nib
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
src = read_source_spaces(fname_vsrc)
vol_fname = op.join(tempdir, 'stc.nii.gz')
stc.save_as_volume(vol_fname, src,
dest='surf', mri_resolution=False)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == src[0]['shape'] + (len(stc.times),))
with warnings.catch_warnings(record=True): # nib<->numpy
t1_img = nib.load(fname_t1)
stc.save_as_volume(op.join(tempdir, 'stc.nii.gz'), src,
dest='mri', mri_resolution=True)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
decimal=5)
# export without saving
img = stc.as_volume(src, dest='mri', mri_resolution=True)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
decimal=5)
except ImportError:
print('Save as nifti test skipped, needs NiBabel')
@testing.requires_testing_data
def test_expand():
"""Test stc expansion
"""
stc = read_source_estimate(fname_stc, 'sample')
assert_true('sample' in repr(stc))
labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)
new_label = labels_lh[0] + labels_lh[1]
stc_limited = stc.in_label(new_label)
stc_new = stc_limited.copy()
stc_new.data.fill(0)
for label in labels_lh[:2]:
stc_new += stc.in_label(label).expand(stc_limited.vertices)
assert_raises(TypeError, stc_new.expand, stc_limited.vertices[0])
assert_raises(ValueError, stc_new.expand, [stc_limited.vertices[0]])
# make sure we can't add unless vertno agree
assert_raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
def _fake_stc(n_time=10):
verts = [np.arange(10), np.arange(90)]
return SourceEstimate(np.random.rand(100, n_time), verts, 0, 1e-1, 'foo')
def test_io_stc():
"""Test IO for STC files
"""
tempdir = _TempDir()
stc = _fake_stc()
stc.save(op.join(tempdir, "tmp.stc"))
stc2 = read_source_estimate(op.join(tempdir, "tmp.stc"))
assert_array_almost_equal(stc.data, stc2.data)
assert_array_almost_equal(stc.tmin, stc2.tmin)
assert_equal(len(stc.vertices), len(stc2.vertices))
for v1, v2 in zip(stc.vertices, stc2.vertices):
assert_array_almost_equal(v1, v2)
assert_array_almost_equal(stc.tstep, stc2.tstep)
@requires_h5py
def test_io_stc_h5():
"""Test IO for STC files using HDF5
"""
tempdir = _TempDir()
stc = _fake_stc()
assert_raises(ValueError, stc.save, op.join(tempdir, 'tmp'), ftype='foo')
out_name = op.join(tempdir, 'tmp')
stc.save(out_name, ftype='h5')
stc3 = read_source_estimate(out_name)
stc4 = read_source_estimate(out_name + '-stc.h5')
assert_raises(RuntimeError, read_source_estimate, out_name, subject='bar')
for stc_new in stc3, stc4:
assert_equal(stc_new.subject, stc.subject)
assert_array_equal(stc_new.data, stc.data)
assert_array_equal(stc_new.tmin, stc.tmin)
assert_array_equal(stc_new.tstep, stc.tstep)
assert_equal(len(stc_new.vertices), len(stc.vertices))
for v1, v2 in zip(stc_new.vertices, stc.vertices):
assert_array_equal(v1, v2)
def test_io_w():
"""Test IO for w files
"""
tempdir = _TempDir()
stc = _fake_stc(n_time=1)
w_fname = op.join(tempdir, 'fake')
stc.save(w_fname, ftype='w')
src = read_source_estimate(w_fname)
src.save(op.join(tempdir, 'tmp'), ftype='w')
src2 = read_source_estimate(op.join(tempdir, 'tmp-lh.w'))
assert_array_almost_equal(src.data, src2.data)
assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
def test_stc_arithmetic():
"""Test arithmetic for STC files
"""
stc = _fake_stc()
data = stc.data.copy()
out = list()
for a in [data, stc]:
a = a + a * 3 + 3 * a - a ** 2 / 2
a += a
a -= a
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
a /= 2 * a
a *= -a
a += 2
a -= 1
a *= -1
a /= 2
b = 2 + a
b = 2 - a
b = +a
assert_array_equal(b.data, a.data)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
a **= 3
out.append(a)
assert_array_equal(out[0], out[1].data)
assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
stc_mean = stc.mean()
assert_array_equal(stc_mean.data, np.mean(stc.data, 1)[:, None])
@slow_test
@testing.requires_testing_data
def test_stc_methods():
"""Test stc methods lh_data, rh_data, bin(), center_of_mass(), resample()
"""
stc = read_source_estimate(fname_stc)
# lh_data / rh_data
assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)])
assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno):])
# bin
bin = stc.bin(.12)
a = np.array((1,), dtype=stc.data.dtype)
a[0] = np.mean(stc.data[0, stc.times < .12])
assert a[0] == bin.data[0, 0]
assert_raises(ValueError, stc.center_of_mass, 'sample')
stc.lh_data[:] = 0
vertex, hemi, t = stc.center_of_mass('sample', subjects_dir=subjects_dir)
assert_true(hemi == 1)
# XXX Should design a fool-proof test case, but here were the results:
assert_equal(vertex, 124791)
assert_equal(np.round(t, 2), 0.12)
stc = read_source_estimate(fname_stc)
stc.subject = 'sample'
label_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)[0]
label_rh = read_labels_from_annot('sample', 'aparc', 'rh',
subjects_dir=subjects_dir)[0]
label_both = label_lh + label_rh
for label in (label_lh, label_rh, label_both):
assert_true(isinstance(stc.shape, tuple) and len(stc.shape) == 2)
stc_label = stc.in_label(label)
if label.hemi != 'both':
if label.hemi == 'lh':
verts = stc_label.vertices[0]
else: # label.hemi == 'rh':
verts = stc_label.vertices[1]
n_vertices_used = len(label.get_vertices_used(verts))
assert_equal(len(stc_label.data), n_vertices_used)
stc_lh = stc.in_label(label_lh)
assert_raises(ValueError, stc_lh.in_label, label_rh)
label_lh.subject = 'foo'
assert_raises(RuntimeError, stc.in_label, label_lh)
stc_new = deepcopy(stc)
o_sfreq = 1.0 / stc.tstep
# note that using no padding for this STC reduces edge ringing...
stc_new.resample(2 * o_sfreq, npad=0, n_jobs=2)
assert_true(stc_new.data.shape[1] == 2 * stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep / 2)
stc_new.resample(o_sfreq, npad=0)
assert_true(stc_new.data.shape[1] == stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep)
assert_array_almost_equal(stc_new.data, stc.data, 5)
@testing.requires_testing_data
def test_extract_label_time_course():
"""Test extraction of label time courses from stc
"""
n_stcs = 3
n_times = 50
src = read_inverse_operator(fname_inv)['src']
vertices = [src[0]['vertno'], src[1]['vertno']]
n_verts = len(vertices[0]) + len(vertices[1])
# get some labels
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
labels_rh = read_labels_from_annot('sample', hemi='rh',
subjects_dir=subjects_dir)
labels = list()
labels.extend(labels_lh[:5])
labels.extend(labels_rh[:4])
n_labels = len(labels)
label_means = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
label_maxs = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
# compute the mean with sign flip
label_means_flipped = np.zeros_like(label_means)
for i, label in enumerate(labels):
label_means_flipped[i] = i * np.mean(label_sign_flip(label, src))
# generate some stc's with known data
stcs = list()
for i in range(n_stcs):
data = np.zeros((n_verts, n_times))
# set the value of the stc within each label
for j, label in enumerate(labels):
if label.hemi == 'lh':
idx = np.intersect1d(vertices[0], label.vertices)
idx = np.searchsorted(vertices[0], idx)
elif label.hemi == 'rh':
idx = np.intersect1d(vertices[1], label.vertices)
idx = len(vertices[0]) + np.searchsorted(vertices[1], idx)
data[idx] = label_means[j]
this_stc = SourceEstimate(data, vertices, 0, 1)
stcs.append(this_stc)
# test some invalid inputs
assert_raises(ValueError, extract_label_time_course, stcs, labels,
src, mode='notamode')
# have an empty label
empty_label = labels[0].copy()
empty_label.vertices += 1000000
assert_raises(ValueError, extract_label_time_course, stcs, empty_label,
src, mode='mean')
# but this works:
with warnings.catch_warnings(record=True): # empty label
tc = extract_label_time_course(stcs, empty_label, src, mode='mean',
allow_empty=True)
for arr in tc:
assert_true(arr.shape == (1, n_times))
assert_array_equal(arr, np.zeros((1, n_times)))
# test the different modes
modes = ['mean', 'mean_flip', 'pca_flip', 'max']
for mode in modes:
label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
label_tc_method = [stc.extract_label_time_course(labels, src,
mode=mode) for stc in stcs]
assert_true(len(label_tc) == n_stcs)
assert_true(len(label_tc_method) == n_stcs)
for tc1, tc2 in zip(label_tc, label_tc_method):
assert_true(tc1.shape == (n_labels, n_times))
assert_true(tc2.shape == (n_labels, n_times))
assert_true(np.allclose(tc1, tc2, rtol=1e-8, atol=1e-16))
if mode == 'mean':
assert_array_almost_equal(tc1, label_means)
if mode == 'mean_flip':
assert_array_almost_equal(tc1, label_means_flipped)
if mode == 'max':
assert_array_almost_equal(tc1, label_maxs)
# test label with very few vertices (check SVD conditionals)
label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
x = label_sign_flip(label, src)
assert_true(len(x) == 2)
label = Label(vertices=[], hemi='lh')
x = label_sign_flip(label, src)
assert_true(x.size == 0)
@slow_test
@testing.requires_testing_data
def test_morph_data():
"""Test morphing of data
"""
tempdir = _TempDir()
subject_from = 'sample'
subject_to = 'fsaverage'
stc_from = read_source_estimate(fname_smorph, subject='sample')
stc_to = read_source_estimate(fname_fmorph)
# make sure we can specify grade
stc_from.crop(0.09, 0.1) # for faster computation
stc_to.crop(0.09, 0.1) # for faster computation
assert_array_equal(stc_to.time_as_index([0.09, 0.1], use_rounding=True),
[0, len(stc_to.times) - 1])
assert_raises(ValueError, stc_from.morph, subject_to, grade=3, smooth=-1,
subjects_dir=subjects_dir)
stc_to1 = stc_from.morph(subject_to, grade=3, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
stc_to1.save(op.join(tempdir, '%s_audvis-meg' % subject_to))
# make sure we can specify vertices
vertices_to = grade_to_vertices(subject_to, grade=3,
subjects_dir=subjects_dir)
stc_to2 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
# make sure we can use different buffer_size
stc_to3 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=3,
subjects_dir=subjects_dir)
# make sure we get a warning about # of steps
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=1, buffer_size=3,
subjects_dir=subjects_dir)
assert_equal(len(w), 2)
assert_array_almost_equal(stc_to.data, stc_to1.data, 5)
assert_array_almost_equal(stc_to1.data, stc_to2.data)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
# make sure precomputed morph matrices work
morph_mat = compute_morph_matrix(subject_from, subject_to,
stc_from.vertices, vertices_to,
smooth=12, subjects_dir=subjects_dir)
stc_to3 = stc_from.morph_precomputed(subject_to, vertices_to, morph_mat)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, vertices_to, 'foo')
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, [vertices_to[0]], morph_mat)
assert_raises(ValueError, stc_from.morph_precomputed,
subject_to, [vertices_to[0][:-1], vertices_to[1]], morph_mat)
assert_raises(ValueError, stc_from.morph_precomputed, subject_to,
vertices_to, morph_mat, subject_from='foo')
# steps warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
compute_morph_matrix(subject_from, subject_to,
stc_from.vertices, vertices_to,
smooth=1, subjects_dir=subjects_dir)
assert_equal(len(w), 2)
mean_from = stc_from.data.mean(axis=0)
mean_to = stc_to1.data.mean(axis=0)
assert_true(np.corrcoef(mean_to, mean_from).min() > 0.999)
# make sure we can fill by morphing
stc_to5 = morph_data(subject_from, subject_to, stc_from, grade=None,
smooth=12, buffer_size=3, subjects_dir=subjects_dir)
assert_true(stc_to5.data.shape[0] == 163842 + 163842)
# Morph sparse data
# Make a sparse stc
stc_from.vertices[0] = stc_from.vertices[0][[100, 500]]
stc_from.vertices[1] = stc_from.vertices[1][[200]]
stc_from._data = stc_from._data[:3]
assert_raises(RuntimeError, stc_from.morph, subject_to, sparse=True,
grade=5, subjects_dir=subjects_dir)
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
stc_from.vertices[0] = np.array([], dtype=np.int64)
stc_from._data = stc_from._data[:1]
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
def _my_trans(data):
"""FFT that adds an additional dimension by repeating result"""
data_t = fft(data)
data_t = np.concatenate([data_t[:, :, None], data_t[:, :, None]], axis=2)
return data_t, None
def test_transform_data():
"""Test applying linear (time) transform to data"""
# make up some data
n_sensors, n_vertices, n_times = 10, 20, 4
kernel = rng.randn(n_vertices, n_sensors)
sens_data = rng.randn(n_sensors, n_times)
vertices = np.arange(n_vertices)
data = np.dot(kernel, sens_data)
for idx, tmin_idx, tmax_idx in\
zip([None, np.arange(n_vertices // 2, n_vertices)],
[None, 1], [None, 3]):
if idx is None:
idx_use = slice(None, None)
else:
idx_use = idx
data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])
for stc_data in (data, (kernel, sens_data)):
stc = VolSourceEstimate(stc_data, vertices=vertices,
tmin=0., tstep=1.)
stc_data_t = stc.transform_data(_my_trans, idx=idx,
tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
assert_allclose(data_f, stc_data_t)
def test_transform():
"""Test applying linear (time) transform to data"""
# make up some data
n_verts_lh, n_verts_rh, n_times = 10, 10, 10
vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
data = rng.randn(n_verts_lh + n_verts_rh, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
# data_t.ndim > 2 & copy is True
stcs_t = stc.transform(_my_trans, copy=True)
assert_true(isinstance(stcs_t, list))
assert_array_equal(stc.times, stcs_t[0].times)
assert_equal(stc.vertices, stcs_t[0].vertices)
data = np.concatenate((stcs_t[0].data[:, :, None],
stcs_t[1].data[:, :, None]), axis=2)
data_t = stc.transform_data(_my_trans)
assert_array_equal(data, data_t) # check against stc.transform_data()
# data_t.ndim > 2 & copy is False
assert_raises(ValueError, stc.transform, _my_trans, copy=False)
# data_t.ndim = 2 & copy is True
tmp = deepcopy(stc)
stc_t = stc.transform(np.abs, copy=True)
assert_true(isinstance(stc_t, SourceEstimate))
assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original?
# data_t.ndim = 2 & copy is False
times = np.round(1000 * stc.times)
verts = np.arange(len(stc.lh_vertno),
len(stc.lh_vertno) + len(stc.rh_vertno), 1)
verts_rh = stc.rh_vertno
t_idx = [np.where(times >= -50)[0][0], np.where(times <= 500)[0][-1]]
data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=t_idx[0],
tmax_idx=t_idx[-1])
stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
assert_true(isinstance(stc, SourceEstimate))
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.5))
assert_true(len(stc.vertices[0]) == 0)
assert_equal(stc.vertices[1], verts_rh)
assert_array_equal(stc.data, data_t)
times = np.round(1000 * stc.times)
t_idx = [np.where(times >= 0)[0][0], np.where(times <= 250)[0][-1]]
data_t = stc.transform_data(np.abs, tmin_idx=t_idx[0], tmax_idx=t_idx[-1])
stc.transform(np.abs, tmin=0, tmax=250, copy=False)
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.2))
assert_array_equal(stc.data, data_t)
@requires_sklearn
def test_spatio_temporal_tris_connectivity():
"""Test spatio-temporal connectivity from triangles"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
connectivity = spatio_temporal_tris_connectivity(tris, 2)
x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
components = stats.cluster_level._get_components(np.array(x), connectivity)
# _get_components works differently now...
old_fmt = [0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1]
new_fmt = np.array(old_fmt)
new_fmt = [np.nonzero(new_fmt == v)[0]
for v in np.unique(new_fmt[new_fmt >= 0])]
assert_true(len(new_fmt), len(components))
for c, n in zip(components, new_fmt):
assert_array_equal(c, n)
@testing.requires_testing_data
def test_spatio_temporal_src_connectivity():
"""Test spatio-temporal connectivity from source spaces"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
src = [dict(), dict()]
connectivity = spatio_temporal_tris_connectivity(tris, 2)
src[0]['use_tris'] = np.array([[0, 1, 2]])
src[1]['use_tris'] = np.array([[0, 1, 2]])
src[0]['vertno'] = np.array([0, 1, 2])
src[1]['vertno'] = np.array([0, 1, 2])
connectivity2 = spatio_temporal_src_connectivity(src, 2)
assert_array_equal(connectivity.todense(), connectivity2.todense())
# add test for dist connectivity
src[0]['dist'] = np.ones((3, 3)) - np.eye(3)
src[1]['dist'] = np.ones((3, 3)) - np.eye(3)
src[0]['vertno'] = [0, 1, 2]
src[1]['vertno'] = [0, 1, 2]
connectivity3 = spatio_temporal_src_connectivity(src, 2, dist=2)
assert_array_equal(connectivity.todense(), connectivity3.todense())
# add test for source space connectivity with omitted vertices
inverse_operator = read_inverse_operator(fname_inv)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
src_ = inverse_operator['src']
connectivity = spatio_temporal_src_connectivity(src_, n_times=2)
assert len(w) == 1
a = connectivity.shape[0] / 2
b = sum([s['nuse'] for s in inverse_operator['src']])
assert_true(a == b)
assert_equal(grade_to_tris(5).shape, [40960, 3])
@requires_pandas
def test_to_data_frame():
"""Test stc Pandas exporter"""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = rng.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for stc in [stc_surf, stc_vol]:
assert_raises(ValueError, stc.to_data_frame, index=['foo', 'bar'])
for ncat, ind in zip([1, 0], ['time', ['subject', 'time']]):
df = stc.to_data_frame(index=ind)
assert_true(df.index.names == ind
if isinstance(ind, list) else [ind])
assert_array_equal(df.values.T[ncat:], stc.data)
# test that non-indexed data were present as categorial variables
assert_true(all([c in ['time', 'subject'] for c in
df.reset_index().columns][:2]))
def test_get_peak():
"""Test peak getter
"""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = rng.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for ii, stc in enumerate([stc_surf, stc_vol]):
assert_raises(ValueError, stc.get_peak, tmin=-100)
assert_raises(ValueError, stc.get_peak, tmax=90)
assert_raises(ValueError, stc.get_peak, tmin=0.002, tmax=0.001)
vert_idx, time_idx = stc.get_peak()
vertno = np.concatenate(stc.vertices) if ii == 0 else stc.vertices
assert_true(vert_idx in vertno)
assert_true(time_idx in stc.times)
ch_idx, time_idx = stc.get_peak(vert_as_index=True,
time_as_index=True)
assert_true(vert_idx < stc.data.shape[0])
assert_true(time_idx < len(stc.times))
@testing.requires_testing_data
def test_mixed_stc():
"""Test source estimate from mixed source space
"""
N = 90 # number of sources
T = 2 # number of time points
S = 3 # number of source spaces
data = rng.randn(N, T)
vertno = S * [np.arange(N // S)]
# make sure error is raised if vertices are not a list of length >= 2
assert_raises(ValueError, MixedSourceEstimate, data=data,
vertices=[np.arange(N)])
stc = MixedSourceEstimate(data, vertno, 0, 1)
vol = read_source_spaces(fname_vsrc)
# make sure error is raised for plotting surface with volume source
assert_raises(ValueError, stc.plot_surface, src=vol)
run_tests_if_main()
| bsd-3-clause |
endolith/scikit-image | doc/examples/features_detection/plot_gabor.py | 21 | 4450 | """
=============================================
Gabor filter banks for texture classification
=============================================
In this example, we will see how to classify textures based on Gabor filter
banks. Frequency and orientation representations of the Gabor filter are
similar to those of the human visual system.
The images are filtered using the real parts of various different Gabor filter
kernels. The mean and variance of the filtered images are then used as features
for classification, which is based on the least squared error for simplicity.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as ndi
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = ndi.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = np.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load('brick.png'))[shrink]
grass = img_as_float(data.load('grass.png'))[shrink]
wall = img_as_float(data.load('rough-wall.png'))[shrink]
image_names = ('brick', 'grass', 'wall')
images = (brick, grass, wall)
# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(wall, kernels)
print('Rotated images matched against references using Gabor filter banks:')
print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=190, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: brick, rotated: 70deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=70, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: grass, rotated: 145deg, match result: ', end='')
feats = compute_feats(ndi.rotate(grass, angle=145, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap')**2 +
ndi.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, [power(img, kernel) for img in images]))
fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(img)
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel), interpolation='nearest')
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(patch, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show()
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/backends/backend_webagg.py | 7 | 12216 | """
Displays Agg images in the browser, with interactivity
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# The WebAgg backend is divided into two modules:
#
# - `backend_webagg_core.py` contains code necessary to embed a WebAgg
# plot inside of a web application, and communicate in an abstract
# way over a web socket.
#
# - `backend_webagg.py` contains a concrete implementation of a basic
# application, implemented with tornado.
from matplotlib.externals import six
import datetime
import errno
import json
import os
import random
import sys
import socket
import threading
try:
import tornado
except ImportError:
raise RuntimeError("The WebAgg backend requires Tornado.")
import tornado.web
import tornado.ioloop
import tornado.websocket
import matplotlib
from matplotlib import rcParams
from matplotlib import backend_bases
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
from . import backend_webagg_core as core
from .backend_webagg_core import TimerTornado
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasWebAgg(figure)
manager = core.FigureManagerWebAgg(canvas, num)
return manager
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(backend_bases.ShowBase):
def mainloop(self):
WebAggApplication.initialize()
url = "http://127.0.0.1:{port}{prefix}".format(
port=WebAggApplication.port,
prefix=WebAggApplication.url_prefix)
if rcParams['webagg.open_in_browser']:
import webbrowser
webbrowser.open(url)
else:
print("To view figure, visit {0}".format(url))
WebAggApplication.start()
show = Show().mainloop
class ServerThread(threading.Thread):
def run(self):
tornado.ioloop.IOLoop.instance().start()
webagg_server_thread = ServerThread()
class FigureCanvasWebAgg(core.FigureCanvasWebAggCore):
def show(self):
# show the figure window
show()
def new_timer(self, *args, **kwargs):
return TimerTornado(*args, **kwargs)
def start_event_loop(self, timeout):
backend_bases.FigureCanvasBase.start_event_loop_default(
self, timeout)
start_event_loop.__doc__ = \
backend_bases.FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
backend_bases.FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__ = \
backend_bases.FigureCanvasBase.stop_event_loop_default.__doc__
class WebAggApplication(tornado.web.Application):
initialized = False
started = False
class FavIcon(tornado.web.RequestHandler):
def get(self):
image_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'mpl-data', 'images')
self.set_header('Content-Type', 'image/png')
with open(os.path.join(image_path,
'matplotlib.png'), 'rb') as fd:
self.write(fd.read())
class SingleFigurePage(tornado.web.RequestHandler):
def __init__(self, application, request, **kwargs):
self.url_prefix = kwargs.pop('url_prefix', '')
return tornado.web.RequestHandler.__init__(self, application,
request, **kwargs)
def get(self, fignum):
fignum = int(fignum)
manager = Gcf.get_fig_manager(fignum)
ws_uri = 'ws://{req.host}{prefix}/'.format(req=self.request,
prefix=self.url_prefix)
self.render(
"single_figure.html",
prefix=self.url_prefix,
ws_uri=ws_uri,
fig_id=fignum,
toolitems=core.NavigationToolbar2WebAgg.toolitems,
canvas=manager.canvas)
class AllFiguresPage(tornado.web.RequestHandler):
def __init__(self, application, request, **kwargs):
self.url_prefix = kwargs.pop('url_prefix', '')
return tornado.web.RequestHandler.__init__(self, application,
request, **kwargs)
def get(self):
ws_uri = 'ws://{req.host}{prefix}/'.format(req=self.request,
prefix=self.url_prefix)
self.render(
"all_figures.html",
prefix=self.url_prefix,
ws_uri=ws_uri,
figures=sorted(
list(Gcf.figs.items()), key=lambda item: item[0]),
toolitems=core.NavigationToolbar2WebAgg.toolitems)
class MplJs(tornado.web.RequestHandler):
def get(self):
self.set_header('Content-Type', 'application/javascript')
js_content = core.FigureManagerWebAgg.get_javascript()
self.write(js_content)
class Download(tornado.web.RequestHandler):
def get(self, fignum, fmt):
fignum = int(fignum)
manager = Gcf.get_fig_manager(fignum)
# TODO: Move this to a central location
mimetypes = {
'ps': 'application/postscript',
'eps': 'application/postscript',
'pdf': 'application/pdf',
'svg': 'image/svg+xml',
'png': 'image/png',
'jpeg': 'image/jpeg',
'tif': 'image/tiff',
'emf': 'application/emf'
}
self.set_header('Content-Type', mimetypes.get(fmt, 'binary'))
buff = six.BytesIO()
manager.canvas.print_figure(buff, format=fmt)
self.write(buff.getvalue())
class WebSocket(tornado.websocket.WebSocketHandler):
supports_binary = True
def open(self, fignum):
self.fignum = int(fignum)
self.manager = Gcf.get_fig_manager(self.fignum)
self.manager.add_web_socket(self)
if hasattr(self, 'set_nodelay'):
self.set_nodelay(True)
def on_close(self):
self.manager.remove_web_socket(self)
def on_message(self, message):
message = json.loads(message)
# The 'supports_binary' message is on a client-by-client
# basis. The others affect the (shared) canvas as a
# whole.
if message['type'] == 'supports_binary':
self.supports_binary = message['value']
else:
manager = Gcf.get_fig_manager(self.fignum)
# It is possible for a figure to be closed,
# but a stale figure UI is still sending messages
# from the browser.
if manager is not None:
manager.handle_json(message)
def send_json(self, content):
self.write_message(json.dumps(content))
def send_binary(self, blob):
if self.supports_binary:
self.write_message(blob, binary=True)
else:
data_uri = "data:image/png;base64,{0}".format(
blob.encode('base64').replace('\n', ''))
self.write_message(data_uri)
def __init__(self, url_prefix=''):
if url_prefix:
assert url_prefix[0] == '/' and url_prefix[-1] != '/', \
'url_prefix must start with a "/" and not end with one.'
super(WebAggApplication, self).__init__(
[
# Static files for the CSS and JS
(url_prefix + r'/_static/(.*)',
tornado.web.StaticFileHandler,
{'path': core.FigureManagerWebAgg.get_static_file_path()}),
# An MPL favicon
(url_prefix + r'/favicon.ico', self.FavIcon),
# The page that contains all of the pieces
(url_prefix + r'/([0-9]+)', self.SingleFigurePage,
{'url_prefix': url_prefix}),
# The page that contains all of the figures
(url_prefix + r'/?', self.AllFiguresPage,
{'url_prefix': url_prefix}),
(url_prefix + r'/mpl.js', self.MplJs),
# Sends images and events to the browser, and receives
# events from the browser
(url_prefix + r'/([0-9]+)/ws', self.WebSocket),
# Handles the downloading (i.e., saving) of static images
(url_prefix + r'/([0-9]+)/download.([a-z0-9.]+)',
self.Download),
],
template_path=core.FigureManagerWebAgg.get_static_file_path())
@classmethod
def initialize(cls, url_prefix='', port=None):
if cls.initialized:
return
# Create the class instance
app = cls(url_prefix=url_prefix)
cls.url_prefix = url_prefix
# This port selection algorithm is borrowed, more or less
# verbatim, from IPython.
def random_ports(port, n):
"""
Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n - 5):
yield port + random.randint(-2 * n, 2 * n)
success = None
cls.port = rcParams['webagg.port']
for port in random_ports(cls.port, rcParams['webagg.port_retries']):
try:
app.listen(port)
except socket.error as e:
if e.errno != errno.EADDRINUSE:
raise
else:
cls.port = port
success = True
break
if not success:
raise SystemExit(
"The webagg server could not be started because an available "
"port could not be found")
cls.initialized = True
@classmethod
def start(cls):
if cls.started:
return
# Set the flag to True *before* blocking on IOLoop.instance().start()
cls.started = True
"""
IOLoop.running() was removed as of Tornado 2.4; see for example
https://groups.google.com/forum/#!topic/python-tornado/QLMzkpQBGOY
Thus there is no correct way to check if the loop has already been
launched. We may end up with two concurrently running loops in that
unlucky case with all the expected consequences.
"""
print("Press Ctrl+C to stop WebAgg server")
sys.stdout.flush()
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print("Server is stopped")
sys.stdout.flush()
finally:
cls.started = False
def ipython_inline_display(figure):
import tornado.template
WebAggApplication.initialize()
if not webagg_server_thread.is_alive():
webagg_server_thread.start()
with open(os.path.join(
core.FigureManagerWebAgg.get_static_file_path(),
'ipython_inline_figure.html')) as fd:
tpl = fd.read()
fignum = figure.number
t = tornado.template.Template(tpl)
return t.generate(
prefix=WebAggApplication.url_prefix,
fig_id=fignum,
toolitems=core.NavigationToolbar2WebAgg.toolitems,
canvas=figure.canvas,
port=WebAggApplication.port).decode('utf-8')
FigureCanvas = FigureCanvasWebAgg
| mit |
mmottahedi/neuralnilm_prototype | scripts/e112.py | 2 | 9691 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
"""
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.1),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'b': Uniform(25),
'W': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'b': Uniform(10),
'W': Uniform(10)
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_b(name):
# as above but without gradient_steps
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.1),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'b': Uniform(25),
'W': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'b': Uniform(10),
'W': Uniform(10)
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_c(name):
# Same as above but with learning rate = 0.01
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.01),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'b': Uniform(25),
'W': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'b': Uniform(10),
'W': Uniform(10)
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_d(name):
# Same as above but with learning rate = 0.001
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.001),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'b': Uniform(25),
'W': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'b': Uniform(10),
'W': Uniform(10)
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('abcd'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=5000)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
| mit |
mattilyra/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
abhishekgahlot/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/gaussian_process/gpc.py | 42 | 31571 | """Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import erf
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.gaussian_process.kernels \
import RBF, CompoundKernel, ConstantKernel as C
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like, shape = (n_classes,)
Unique class labels.
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_: array-like, shape = (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_: array-like, shape = (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError("%s supports only binary classification. "
"y contains classes %s"
% (self.__class__.__name__, self.classes_))
elif self.classes_.size == 1:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds)]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0],
bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = \
self._posterior_mode(K, return_temporaries=True)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = \
self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
* (pi * (1 - pi) * (1 - 2 * pi)) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if self.warm_start and hasattr(self, "f_cached") \
and self.f_cached.shape == self.y_train_.shape:
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = 1 / (1 + np.exp(-f))
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(BaseEstimator, ClassifierMixin):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
multi_class: string, default: "one_vs_rest"
Specifies how multi-class classification problems are handled.
Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest",
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In "one_vs_one", one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that "one_vs_one" does not support predicting probability
estimates.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
kernel_ : kernel object
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like, shape = (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None,
multi_class="one_vs_rest", n_jobs=1):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
self.kernel, self.optimizer, self.n_restarts_optimizer,
self.max_iter_predict, self.warm_start, self.copy_X_train,
self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError("GaussianProcessClassifier requires 2 or more "
"distinct classes. Only class %s present."
% self.classes_[0])
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = \
OneVsRestClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = \
OneVsOneClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
else:
raise ValueError("Unknown multi-class mode %s"
% self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_])
else:
self.log_marginal_likelihood_value_ = \
self.base_estimator_.log_marginal_likelihood()
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["classes_", "n_classes_"])
X = check_array(X)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError("one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead.")
X = check_array(X)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_
for estimator in self.base_estimator_.estimators_])
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or none
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC.")
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[estimator.log_marginal_likelihood(theta)
for i, estimator in enumerate(estimators)])
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[estimator.log_marginal_likelihood(
theta[n_dims * i:n_dims * (i + 1)])
for i, estimator in enumerate(estimators)])
else:
raise ValueError("Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0],
theta.shape[0]))
| bsd-3-clause |
johnmgregoire/PythonCompositionPlots | quaternary_ternary_faces_demo3.py | 1 | 3340 | import matplotlib.cm as cm
import numpy, sys
import pylab
import operator, copy, os
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from quaternary_ternary_faces import ternaryfaces
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
try:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
except ImportError:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from myquaternaryutility import QuaternaryPlot
class plotwidget(FigureCanvas):
def __init__(self, parent, width=12, height=6, dpi=72, projection3d=False):
#plotdata can be 2d array for image plot or list of 2 1d arrays for x-y plot or 2d array for image plot or list of lists of 2 1D arrays
self.projection3d=projection3d
self.fig=Figure(figsize=(width, height), dpi=dpi)
if projection3d:
self.axes=self.fig.add_subplot(111, navigate=True, projection='3d')
else:
self.axes=self.fig.add_subplot(111, navigate=True)
self.axes.hold(True)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
#self.parent=parent
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
#NavigationToolbar(self, parent)
NavigationToolbar(self, self)
self.mpl_connect('button_press_event', self.myclick)
self.clicklist=[]
self.cbax=None
def myclick(self, event):
if not (event.xdata is None or event.ydata is None):
arrayxy=[event.xdata, event.ydata]
print 'clicked on image: array indeces ', arrayxy, ' using button', event.button
self.clicklist+=[arrayxy]
self.emit(SIGNAL("genericclickonplot"), [event.xdata, event.ydata, event.button, event.inaxes])
class dialog(QDialog):
def __init__(self, parent=None, title='', folderpath=None):
super(dialog, self).__init__(parent)
plotw=plotwidget(self)
ax=plotw.axes
intervs=20
compsint=[[b, c, (intervs-a-b-c), a] for a in numpy.arange(0,intervs+1)[::-1] for b in numpy.arange(0,intervs+1-a) for c in numpy.arange(0,intervs+1-a-b)][::-1]
print len(compsint)
comps=numpy.float32(compsint)/intervs
pylab.figure()
stpquat=QuaternaryPlot(111)
cols=stpquat.rgb_comp(comps)
stpquat.scatter(comps, c=cols, s=100, edgecolors='none')
stpquat.label()
self.tf=ternaryfaces(ax)
self.tf.label()
self.tf.scatter(comps, cols, skipinds=[0, 1, 2, 3], s='patch')
QObject.connect(plotw, SIGNAL("genericclickonplot"), self.plotclick)
mainlayout=QGridLayout()
mainlayout.addWidget(plotw, 0, 0)
self.setLayout(mainlayout)
def plotclick(self, coords_button_ax):
xc, yc, button, ax=coords_button_ax
print self.tf.toComp(xc, yc)
class MainMenu(QMainWindow):
def __init__(self):
super(MainMenu, self).__init__(None)
x=dialog()
x.exec_()
mainapp=QApplication(sys.argv)
form=MainMenu()
form.show()
form.setFocus()
mainapp.exec_()
| bsd-3-clause |
eteq/catterplotpy | catterplot/core.py | 1 | 4056 | # Copyright 2017 Erik Tollerud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals # just in case, for py2 to be py3-ish
import pkgutil, io
import numpy as np
from matplotlib import image, cm
from matplotlib import pyplot as plt
__all__ = ['get_cat_num', 'n_cats', 'catter']
# N_cats x 72 x 72, 0 is transparent, 1 is full-cat
_CAT_DATA = np.load(io.BytesIO(pkgutil.get_data('catterplot', 'data/cats.npy')))
def get_cat_num(i):
return _CAT_DATA[i]
def n_cats():
return len(_CAT_DATA)
def catter(x, y, s=40, c=None, cat='random', alpha=1, ax=None, cmap=None,
aspects='auto'):
"""
A catter plot (scatter plot with cats). Most arguments are interpreted
the same as the matplotlib `scatter` function, except that ``s`` is the
*data* size of the symbol (not pixel). Additional kwargs include:
``cat`` can be:
* int : the index of the cat symbol to use - you can use
``catterplot.n_cats()`` to get the number of cats available
* a squence of ints : must match the data, but otherwise interpreted as for
a scalar
* 'random'/'rand' : random cats
``ax`` can be:
* None: use the current default (pyplot) axes
* an `Axes` : random cats
``aspects`` can be:
* 'auto': the cats length-to-width is set to be square given the spread of
inputs
* a float: the height/width of the cats. If not 1, ``s`` is interpreted as
the geometric mean of the sizes
* a sequence of floats: much match data, gives height/width
"""
if ax is None:
ax = plt.gca()
if c is not None:
if cmap is None:
cmap = plt.rcParams['image.cmap']
smap = cm.ScalarMappable(cmap=cmap)
rgba = smap.to_rgba(c)
else:
rgba = np.ones((len(x), 4))
rgba[:, 3] *= alpha
if np.isscalar(s) or s.shape==tuple():
s = np.ones(len(x))*s
# otherwise assume shapes match
if cat in ('rand', 'random'):
cats = np.random.randint(n_cats(), size=len(x))
else:
try:
cats = np.ones(len(x)) * cat
except TypeError as e:
raise TypeError('`cat` argument needs to be "random", a scalar, or match the input.', e)
if aspects == 'auto':
aspects = np.ptp(y)/np.ptp(x)
if np.isscalar(aspects) or aspects.shape==tuple():
aspects = np.ones(len(x)) * aspects
ims = []
for xi, yi, si, ci, cati, aspecti in zip(x, y, s, rgba, cats, aspects):
data = get_cat_num(cati)
offsetx = si * aspecti**-0.5 / (2 * data.shape[0])
offsety = si * aspecti**0.5 / (2 * data.shape[1])
im = image.AxesImage(ax, extent=(xi - offsetx, xi + offsetx,
yi - offsety, yi + offsety))
if c is None:
# defaults to fading "black"
cdata = 1-data
else:
# leave just the alpha to control the fading
cdata = np.ones(data.shape)
imarr = np.transpose([cdata*ci[0], cdata*ci[1], cdata*ci[2],
data*ci[3]], (1, 2, 0))
im.set_data(imarr)
ims.append(im)
for im in ims:
ax.add_image(im)
#ax.autoscale_view()
# for some reason autoscaling fails for images. So we'll just force it via
# scatter...
sc = plt.scatter(x, y)
sc.remove()
return ims
| apache-2.0 |
INGEOTEC/b4msa | b4msa/classifier.py | 1 | 6720 | # Copyright 2016 Ranyart R. Suarez (https://github.com/RanyartRodrigo) and Mario Graff (https://github.com/mgraffg)
# with collaborations of Eric S. Tellez
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.svm import LinearSVC
# from b4msa.textmodel import TextModel
import numpy as np
from microtc.utils import read_data_labels, read_data, tweet_iterator
from sklearn import preprocessing
from sklearn.model_selection import StratifiedKFold
from b4msa.textmodel import TextModel
from multiprocessing import Pool
from scipy.sparse import csr_matrix
class SVC(object):
"""Classifier
:param model: TextModel
:type model: class
Usage:
>>> from b4msa.textmodel import TextModel
>>> from b4msa.classifier import SVC
>>> corpus = ['buenos dias', 'catedras conacyt', 'categorizacion de texto ingeotec']
>>> textmodel = TextModel(corpus)
>>> svc = SVC(textmodel)
>>> _ = svc.fit([textmodel[x] for x in corpus], [1, 0, 0])
>>> svc.predict_text('hola')
0
"""
def __init__(self, model, **kwargs):
self.svc = LinearSVC(**kwargs)
self.model = model
@property
def num_terms(self):
"""Dimension which is the number of terms of the corpus
:rtype: int
"""
try:
return self._num_terms
except AttributeError:
self._num_terms = None
return None
def tonp(self, X):
"""Sparse representation to sparce matrix
:param X: Sparse representation of matrix
:type X: list
:rtype: csr_matrix
"""
data = []
row = []
col = []
for r, x in enumerate(X):
cc = [_[0] for _ in x if np.isfinite(_[1]) and (self.num_terms is None or _[0] < self.num_terms)]
col += cc
data += [_[1] for _ in x if np.isfinite(_[1]) and (self.num_terms is None or _[0] < self.num_terms)]
_ = [r] * len(cc)
row += _
if self.num_terms is None:
_ = csr_matrix((data, (row, col)))
self._num_terms = _.shape[1]
return _
return csr_matrix((data, (row, col)), shape=(len(X), self.num_terms))
def fit(self, X, y):
"""Train the classifier
:param X: inputs - independent variables
:type X: lst
:param y: output - dependent variable
:rtype: instance
"""
X = self.tonp(X)
self.le = preprocessing.LabelEncoder()
self.le.fit(y)
y = self.le.transform(y)
if self.num_terms == 0:
return self
self.svc.fit(X, y)
return self
def decision_function(self, Xnew):
Xnew = self.tonp(Xnew)
return self.svc.decision_function(Xnew)
def predict(self, Xnew):
if self.num_terms == 0:
return self.le.inverse_transform(np.zeros(len(Xnew), dtype=np.int))
Xnew = self.tonp(Xnew)
ynew = self.svc.predict(Xnew)
return self.le.inverse_transform(ynew)
def predict_text(self, text):
y = self.predict([self.model[text]])
return y[0]
def fit_file(self, fname, get_tweet='text',
get_klass='klass', maxitems=1e100):
X, y = read_data_labels(fname, get_klass=get_klass,
get_tweet=get_tweet, maxitems=maxitems)
self.fit([self.model[x] for x in X], y)
return self
def predict_file(self, fname, get_tweet='text', maxitems=1e100):
hy = [self.predict_text(x)
for x in read_data(fname, get_tweet=get_tweet,
maxitems=maxitems)]
return hy
@classmethod
def predict_kfold(cls, X, y, n_folds=10, seed=0, textModel_params={},
kfolds=None, pool=None, use_tqdm=True):
try:
from tqdm import tqdm
except ImportError:
def tqdm(x, **kwargs):
return x
le = preprocessing.LabelEncoder().fit(y)
y = np.array(le.transform(y))
hy = np.zeros(len(y), dtype=np.int)
if kfolds is None:
kfolds = StratifiedKFold(n_splits=n_folds, shuffle=True,
random_state=seed).split(X, y)
args = [(X, y, tr, ts, textModel_params) for tr, ts in kfolds]
if pool is not None:
if use_tqdm:
res = [x for x in tqdm(pool.imap_unordered(cls.train_predict_pool, args),
desc='Params', total=len(args))]
else:
res = [x for x in pool.imap_unordered(cls.train_predict_pool, args)]
else:
if use_tqdm:
args = tqdm(args)
res = [cls.train_predict_pool(x) for x in args]
for ts, _hy in res:
hy[ts] = _hy
return le.inverse_transform(hy)
@classmethod
def train_predict_pool(cls, args):
X, y, tr, ts, textModel_params = args
params = TextModel.params()
textModel_params = {k: v for k, v in textModel_params.items() if k in params}
t = TextModel([X[x] for x in tr], **textModel_params)
m = cls(t).fit([t[X[x]] for x in tr], [y[x] for x in tr])
return ts, np.array(m.predict([t[X[x]] for x in ts]))
@classmethod
def predict_kfold_params(cls, fname, n_folds=10, score=None, numprocs=None, seed=0, param_kwargs={}):
from b4msa.params import ParameterSelection, Wrapper
X, y = read_data_labels(fname)
if numprocs is not None:
pool = Pool(numprocs)
else:
pool = None
numprocs = 1
if n_folds % numprocs == 0:
f = Wrapper(X, y, score, n_folds, cls, pool=pool, seed=seed)
pool = None
else:
f = Wrapper(X, y, score, n_folds, cls, seed=seed)
return ParameterSelection().search(f.f, pool=pool, **param_kwargs)
@classmethod
def fit_from_file(cls, fname, textModel_params={}):
D = [x for x in tweet_iterator(fname)]
# X, y = read_data_labels(fname)
y = [x['klass'] for x in D]
model = TextModel(D, **textModel_params)
svc = cls(model)
return svc.fit([model[x] for x in D], y)
| apache-2.0 |
harisbal/pandas | asv_bench/benchmarks/ctors.py | 3 | 1817 | import numpy as np
import pandas.util.testing as tm
from pandas import Series, Index, DatetimeIndex, Timestamp, MultiIndex
class SeriesConstructors(object):
param_names = ["data_fmt", "with_index"]
params = [[lambda x: x,
list,
lambda arr: list(arr.astype(str)),
lambda arr: dict(zip(range(len(arr)), arr)),
lambda arr: [(i, -i) for i in arr],
lambda arr: [[i, -i] for i in arr],
lambda arr: ([(i, -i) for i in arr][:-1] + [None]),
lambda arr: ([[i, -i] for i in arr][:-1] + [None])],
[False, True]]
def setup(self, data_fmt, with_index):
N = 10**4
arr = np.random.randn(N)
self.data = data_fmt(arr)
self.index = np.arange(N) if with_index else None
def time_series_constructor(self, data_fmt, with_index):
Series(self.data, index=self.index)
class SeriesDtypesConstructors(object):
def setup(self):
N = 10**4
self.arr = np.random.randn(N, N)
self.arr_str = np.array(['foo', 'bar', 'baz'], dtype=object)
self.s = Series([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')] * N * 10)
def time_index_from_array_string(self):
Index(self.arr_str)
def time_index_from_array_floats(self):
Index(self.arr)
def time_dtindex_from_series(self):
DatetimeIndex(self.s)
def time_dtindex_from_index_with_series(self):
Index(self.s)
class MultiIndexConstructor(object):
def setup(self):
N = 10**4
self.iterables = [tm.makeStringIndex(N), range(20)]
def time_multiindex_from_iterables(self):
MultiIndex.from_product(self.iterables)
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/IPython/core/shellapp.py | 7 | 16199 | # encoding: utf-8
"""
A mixin for :class:`~IPython.core.application.Application` classes that
launch InteractiveShell instances, load extensions, etc.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
from __future__ import print_function
import glob
import os
import sys
from traitlets.config.application import boolean_flag
from traitlets.config.configurable import Configurable
from traitlets.config.loader import Config
from IPython.core import pylabtools
from IPython.utils import py3compat
from IPython.utils.contexts import preserve_keys
from IPython.utils.path import filefind
from traitlets import (
Unicode, Instance, List, Bool, CaselessStrEnum, observe,
)
from IPython.terminal import pt_inputhooks
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
gui_keys = tuple(sorted(pt_inputhooks.backends) + sorted(pt_inputhooks.aliases))
backend_keys = sorted(pylabtools.backends.keys())
backend_keys.insert(0, 'auto')
shell_flags = {}
addflag = lambda *args: shell_flags.update(boolean_flag(*args))
addflag('autoindent', 'InteractiveShell.autoindent',
'Turn on autoindenting.', 'Turn off autoindenting.'
)
addflag('automagic', 'InteractiveShell.automagic',
"""Turn on the auto calling of magic commands. Type %%magic at the
IPython prompt for more information.""",
'Turn off the auto calling of magic commands.'
)
addflag('pdb', 'InteractiveShell.pdb',
"Enable auto calling the pdb debugger after every exception.",
"Disable auto calling the pdb debugger after every exception."
)
addflag('pprint', 'PlainTextFormatter.pprint',
"Enable auto pretty printing of results.",
"Disable auto pretty printing of results."
)
addflag('color-info', 'InteractiveShell.color_info',
"""IPython can display information about objects via a set of functions,
and optionally can use colors for this, syntax highlighting
source code and various other elements. This is on by default, but can cause
problems with some pagers. If you see such problems, you can disable the
colours.""",
"Disable using colors for info related things."
)
nosep_config = Config()
nosep_config.InteractiveShell.separate_in = ''
nosep_config.InteractiveShell.separate_out = ''
nosep_config.InteractiveShell.separate_out2 = ''
shell_flags['nosep']=(nosep_config, "Eliminate all spacing between prompts.")
shell_flags['pylab'] = (
{'InteractiveShellApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""
)
shell_flags['matplotlib'] = (
{'InteractiveShellApp' : {'matplotlib' : 'auto'}},
"""Configure matplotlib for interactive use with
the default matplotlib backend."""
)
# it's possible we don't want short aliases for *all* of these:
shell_aliases = dict(
autocall='InteractiveShell.autocall',
colors='InteractiveShell.colors',
logfile='InteractiveShell.logfile',
logappend='InteractiveShell.logappend',
c='InteractiveShellApp.code_to_run',
m='InteractiveShellApp.module_to_run',
ext='InteractiveShellApp.extra_extension',
gui='InteractiveShellApp.gui',
pylab='InteractiveShellApp.pylab',
matplotlib='InteractiveShellApp.matplotlib',
)
shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class InteractiveShellApp(Configurable):
"""A Mixin for applications that start InteractiveShell instances.
Provides configurables for loading extensions and executing files
as part of configuring a Shell environment.
The following methods should be called by the :meth:`initialize` method
of the subclass:
- :meth:`init_path`
- :meth:`init_shell` (to be implemented by the subclass)
- :meth:`init_gui_pylab`
- :meth:`init_extensions`
- :meth:`init_code`
"""
extensions = List(Unicode(),
help="A list of dotted module names of IPython extensions to load."
).tag(config=True)
extra_extension = Unicode('',
help="dotted module name of an IPython extension to load."
).tag(config=True)
reraise_ipython_extension_failures = Bool(False,
help="Reraise exceptions encountered loading IPython extensions?",
).tag(config=True)
# Extensions that are always loaded (not configurable)
default_extensions = List(Unicode(), [u'storemagic']).tag(config=False)
hide_initial_ns = Bool(True,
help="""Should variables loaded at startup (by startup files, exec_lines, etc.)
be hidden from tools like %who?"""
).tag(config=True)
exec_files = List(Unicode(),
help="""List of files to run at IPython startup."""
).tag(config=True)
exec_PYTHONSTARTUP = Bool(True,
help="""Run the file referenced by the PYTHONSTARTUP environment
variable at IPython startup."""
).tag(config=True)
file_to_run = Unicode('',
help="""A file to be run""").tag(config=True)
exec_lines = List(Unicode(),
help="""lines of code to run at IPython startup."""
).tag(config=True)
code_to_run = Unicode('',
help="Execute the given command string."
).tag(config=True)
module_to_run = Unicode('',
help="Run the module as a script."
).tag(config=True)
gui = CaselessStrEnum(gui_keys, allow_none=True,
help="Enable GUI event loop integration with any of {0}.".format(gui_keys)
).tag(config=True)
matplotlib = CaselessStrEnum(backend_keys, allow_none=True,
help="""Configure matplotlib for interactive use with
the default matplotlib backend."""
).tag(config=True)
pylab = CaselessStrEnum(backend_keys, allow_none=True,
help="""Pre-load matplotlib and numpy for interactive use,
selecting a particular matplotlib backend and loop integration.
"""
).tag(config=True)
pylab_import_all = Bool(True,
help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
and an ``import *`` is done from numpy and pylab, when using pylab mode.
When False, pylab mode should not import any names into the user namespace.
"""
).tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
# whether interact-loop should start
interact = Bool(True)
user_ns = Instance(dict, args=None, allow_none=True)
@observe('user_ns')
def _user_ns_changed(self, change):
if self.shell is not None:
self.shell.user_ns = change['new']
self.shell.init_user_ns()
def init_path(self):
"""Add current working directory, '', to sys.path"""
if sys.path[0] != '':
sys.path.insert(0, '')
def init_shell(self):
raise NotImplementedError("Override in subclasses")
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
enable = False
shell = self.shell
if self.pylab:
enable = lambda key: shell.enable_pylab(key, import_all=self.pylab_import_all)
key = self.pylab
elif self.matplotlib:
enable = shell.enable_matplotlib
key = self.matplotlib
elif self.gui:
enable = shell.enable_gui
key = self.gui
if not enable:
return
try:
r = enable(key)
except ImportError:
self.log.warning("Eventloop or matplotlib integration failed. Is matplotlib installed?")
self.shell.showtraceback()
return
except Exception:
self.log.warning("GUI event loop or pylab initialization failed")
self.shell.showtraceback()
return
if isinstance(r, tuple):
gui, backend = r[:2]
self.log.info("Enabling GUI event loop integration, "
"eventloop=%s, matplotlib=%s", gui, backend)
if key == "auto":
print("Using matplotlib backend: %s" % backend)
else:
gui = r
self.log.info("Enabling GUI event loop integration, "
"eventloop=%s", gui)
def init_extensions(self):
"""Load all IPython extensions in IPythonApp.extensions.
This uses the :meth:`ExtensionManager.load_extensions` to load all
the extensions listed in ``self.extensions``.
"""
try:
self.log.debug("Loading IPython extensions...")
extensions = self.default_extensions + self.extensions
if self.extra_extension:
extensions.append(self.extra_extension)
for ext in extensions:
try:
self.log.info("Loading IPython extension: %s" % ext)
self.shell.extension_manager.load_extension(ext)
except:
if self.reraise_ipython_extension_failures:
raise
msg = ("Error in loading extension: {ext}\n"
"Check your config files in {location}".format(
ext=ext,
location=self.profile_dir.location
))
self.log.warning(msg, exc_info=True)
except:
if self.reraise_ipython_extension_failures:
raise
self.log.warning("Unknown error in loading extensions:", exc_info=True)
def init_code(self):
"""run the pre-flight code, specified via exec_lines"""
self._run_startup_files()
self._run_exec_lines()
self._run_exec_files()
# Hide variables defined here from %who etc.
if self.hide_initial_ns:
self.shell.user_ns_hidden.update(self.shell.user_ns)
# command-line execution (ipython -i script.py, ipython -m module)
# should *not* be excluded from %whos
self._run_cmd_line_code()
self._run_module()
# flush output, so itwon't be attached to the first cell
sys.stdout.flush()
sys.stderr.flush()
def _run_exec_lines(self):
"""Run lines of code in IPythonApp.exec_lines in the user's namespace."""
if not self.exec_lines:
return
try:
self.log.debug("Running code from IPythonApp.exec_lines...")
for line in self.exec_lines:
try:
self.log.info("Running code in user namespace: %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warning("Error in executing line in user "
"namespace: %s" % line)
self.shell.showtraceback()
except:
self.log.warning("Unknown error in handling IPythonApp.exec_lines:")
self.shell.showtraceback()
def _exec_file(self, fname, shell_futures=False):
try:
full_filename = filefind(fname, [u'.', self.ipython_dir])
except IOError:
self.log.warning("File not found: %r"%fname)
return
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv
sys.argv = [full_filename] + self.extra_args[1:]
# protect sys.argv from potential unicode strings on Python 2:
if not py3compat.PY3:
sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
try:
if os.path.isfile(full_filename):
self.log.info("Running file in user namespace: %s" %
full_filename)
# Ensure that __file__ is always defined to match Python
# behavior.
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns['__file__'] = fname
if full_filename.endswith('.ipy'):
self.shell.safe_execfile_ipy(full_filename,
shell_futures=shell_futures)
else:
# default to python, even without extension
self.shell.safe_execfile(full_filename,
self.shell.user_ns,
shell_futures=shell_futures,
raise_exceptions=True)
finally:
sys.argv = save_argv
def _run_startup_files(self):
"""Run files from profile startup directory"""
startup_dir = self.profile_dir.startup_dir
startup_files = []
if self.exec_PYTHONSTARTUP and os.environ.get('PYTHONSTARTUP', False) and \
not (self.file_to_run or self.code_to_run or self.module_to_run):
python_startup = os.environ['PYTHONSTARTUP']
self.log.debug("Running PYTHONSTARTUP file %s...", python_startup)
try:
self._exec_file(python_startup)
except:
self.log.warning("Unknown error in handling PYTHONSTARTUP file %s:", python_startup)
self.shell.showtraceback()
startup_files += glob.glob(os.path.join(startup_dir, '*.py'))
startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
if not startup_files:
return
self.log.debug("Running startup files from %s...", startup_dir)
try:
for fname in sorted(startup_files):
self._exec_file(fname)
except:
self.log.warning("Unknown error in handling startup files:")
self.shell.showtraceback()
def _run_exec_files(self):
"""Run files from IPythonApp.exec_files"""
if not self.exec_files:
return
self.log.debug("Running files in IPythonApp.exec_files...")
try:
for fname in self.exec_files:
self._exec_file(fname)
except:
self.log.warning("Unknown error in handling IPythonApp.exec_files:")
self.shell.showtraceback()
def _run_cmd_line_code(self):
"""Run code or file specified at the command-line"""
if self.code_to_run:
line = self.code_to_run
try:
self.log.info("Running code given at command line (c=): %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warning("Error in executing line in user namespace: %s" %
line)
self.shell.showtraceback()
if not self.interact:
self.exit(1)
# Like Python itself, ignore the second if the first of these is present
elif self.file_to_run:
fname = self.file_to_run
if os.path.isdir(fname):
fname = os.path.join(fname, "__main__.py")
try:
self._exec_file(fname, shell_futures=True)
except:
self.shell.showtraceback(tb_offset=4)
if not self.interact:
self.exit(1)
def _run_module(self):
"""Run module specified at the command-line."""
if self.module_to_run:
# Make sure that the module gets a proper sys.argv as if it were
# run using `python -m`.
save_argv = sys.argv
sys.argv = [sys.executable] + self.extra_args
try:
self.shell.safe_run_module(self.module_to_run,
self.shell.user_ns)
finally:
sys.argv = save_argv
| mit |
openturns/otwrapy | otwrapy/_otwrapy.py | 2 | 19844 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
General purpose OpenTURNS python wrapper tools
"""
import os
import gzip
import pickle
from tempfile import mkdtemp
import shutil
from functools import wraps
import logging
import openturns as ot
import numpy as np
__author__ = "Felipe Aguirre Martinez"
__copyright__ = "Copyright 2015-2019 Phimeca"
__email__ = "[email protected]"
__all__ = ['load_array', 'dump_array', '_exec_sample_joblib',
'_exec_sample_multiprocessing', '_exec_sample_ipyparallel',
'FunctionDecorator', 'TempWorkDir', 'Parallelizer',
'create_logger', 'Debug', 'safemakedirs']
base_dir = os.path.dirname(__file__)
def load_array(filename, compressed=False):
"""Load a (possibly compressed) pickled array.
Parameters
----------
filename : str
Path to the file to be loaded. If the extension is '.pklz', it considers
that the file is compressed with *gzip*.
compressed : bool
Indicates if the file is compressed with gzip or not.
"""
if compressed or (filename.split('.')[-1] == 'pklz'):
with gzip.open(filename, 'rb') as fh:
return pickle.load(fh)
else:
with open(filename, 'rb') as fh:
return pickle.load(fh)
def dump_array(array, filename, compress=False):
"""Dump an array to a (possibly compressed) file.
Parameters
----------
array : array
Array to be compressed. Typically a np.array or ot.Sample.
filename : str
Path where the file is dumped. If the extension is '.pklz', it considers
that the file has to be compressed with *gzip*.
compressed : bool
Indicates if the file has to be compressed with gzip or not.
"""
if compress or (filename.split('.')[-1] == 'pklz'):
with gzip.open(filename, 'wb') as fh:
pickle.dump(array, fh, protocol=2)
else:
with open(filename, 'wb') as fh:
pickle.dump(array, fh, protocol=2)
def safemakedirs(folder):
"""Make a directory without raising an error if it exists.
Parameters
----------
folder : str
Path of the folder to be created.
"""
if not os.path.exists(folder):
os.makedirs(folder)
def create_logger(logfile, loglevel=None):
"""Create a logger with a FileHandler at the given loglevel.
Parameters
----------
logfile : str
Filename for the logger FileHandler to be created.
loglevel : logging level
Threshold for the logger. Logging messages which are less severe than
loglevel will be ignored. It defaults to logging.DEBUG.
"""
if loglevel is None:
loglevel = logging.DEBUG
logger = logging.getLogger('logger_otwrapy')
logger.setLevel(loglevel)
# ----------------------------------------------------------
# Create file handler which logs even DEBUG messages
fh = logging.FileHandler(filename=logfile, mode='w')
fh.setLevel(logging.DEBUG)
# Create a formatter for the file handlers
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
fh.setFormatter(formatter)
# Add the handler to the logger
logger.addHandler(fh)
return logger
class Debug(object):
"""Decorator that catches exceptions inside a function and logs them.
A decorator used to protect functions so that exceptions are logged to a
file. It can either be instantiated with a Logger or with a filename for
which a logger will be created with a FileHandler. It comes specially handy
when you launch your codes in a non interactive environment (e.g., HPC
cluster through submission scripts), given that Exceptions are captured
and logged to a file.
The great benefit of this implementation is that with a simple decorator
you can protect the methods of your Wrapper class with a try/except
structure. However, this might not be useful for a deeper debugging where
you want to have access to the locals() of the place where the Exception
jumped. If you bump into such a case, add a try/except structure that
catches the Exception on the specific place. It is advised to use the
decorator once you have developed the wrapper and that you are ready to
launch your uncertainty studies.
Parameters
----------
logger : logging.Logger or str
Either a Logger instance or a filename for the logger to be created.
loglevel : logging level
Threshold for the logger. Logging messages which are less severe than
loglevel will be ignored. It defaults to logging.DEBUG.
Examples
--------
To catch exceptions raised inside a function and log them to a file :
>>> import otwrapy as otw
>>> @otw.Debug('func.log')
>>> def func(*args, **kwargs):
>>> pass
"""
def __init__(self, logger, loglevel=None):
if isinstance(logger, logging.Logger):
self.logger = logger
if loglevel is not None:
self.logger.setLevel(loglevel)
elif isinstance(logger, str):
self.logger = create_logger(logger, loglevel=loglevel)
def __call__(self, func):
@wraps(func)
def func_debugged(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
self.logger.error(e, exc_info=True)
raise e
return func_debugged
class FunctionDecorator(object):
"""Convert an OpenTURNSPythonFunction into a Function.
This class is intended to be used as a decorator.
Parameters
----------
enableCache : bool (Optional)
If True, enable cache of the returned ot.Function
Examples
--------
In order to always get an ot.Function when instantiating your
wrapper, decorate it as follows:
>>> import otwrapy as otw
>>> import openturns as ot
>>> @otw.FunctionDecorator(enableCache=True)
>>> class Wrapper(ot.OpenTURNSPythonFunction):
>>> pass
Note that a great disadvantage of this decorator is that your wrapper cannot
be parallelized afterwards. Only use it if you don't plan to parallelize
your wrapper or if the wrapper itself is parallelized already. However, if
you plan to use :class:`Parallelizer`, there is no need to use this decorator !
Notes
-----
I wanted this decorator to work also with Wrapper class, but it only works
with ParallelWrapper for the moment. The problem is that, apparently,
decorated classes are not picklable, and Wrapper instances must be picklable
so that they can be easily distributed with `multiprocessing`
References
----------
http://simeonfranklin.com/blog/2012/jul/1/python-decorators-in-12-steps/
http://www.artima.com/weblogs/viewpost.jsp?thread=240808
http://stackoverflow.com/questions/30714485/why-does-a-decorated-class-looses-its-docstrings
http://stackoverflow.com/questions/30711730/decorated-class-looses-acces-to-its-attributes
"""
def __init__(self, enableCache=True, doc=None):
self.enableCache = enableCache
self.doc = doc
def __call__(self, wrapper):
@wraps(wrapper)
def numericalmathfunction(*args, **kwargs):
func = ot.Function(wrapper(*args, **kwargs))
# Enable cache
if self.enableCache:
func = ot.MemoizeFunction(func)
func.disableHistory()
# Update __doc__ of the function
if self.doc is None:
# Inherit __doc__ from ParallelWrapper.
func.__doc__ = wrapper.__doc__
else:
func.__doc__ = self.doc
# Add the kwargs as attributes of the function for reference
# purposes.
func.__dict__.update(kwargs)
return func
# Keep the wrapper class as reference
numericalmathfunction.__wrapper__ = wrapper
return numericalmathfunction
class TempWorkDir(object):
"""Implement a context manager that creates a temporary working directory.
Create a temporary working directory on `base_temp_work_dir` preceded by
`prefix` and clean up at the exit if necessary.
See: http://sametmax.com/les-context-managers-et-le-mot-cle-with-en-python/
Parameters
----------
base_temp_work_dir : str (optional)
Root path where the temporary working directory will be created. If None,
it will default to the platform dependant temporary working directory
Default = None
prefix : str (optional)
String that preceeds the directory name.
Default = 'run-'
cleanup : bool (optional)
If True erase the directory and its children at the exit.
Default = False
transfer : list (optional)
List of files or folders to transfer to the temporary working directory
Examples
--------
In the following example, everything that is executed inside the `with`
environment will happen at a temporary working directory created at
:file:`/tmp` with :file:`/run-` as a prefix. The created directory will be
erased upon the exit of the `with` environment and python will go
back to the preceding working directory, even if an Exception is raised.
>>> import otwrapy as otw
>>> import os
>>> print "I'm on my project directory"
>>> print os.getcwd()
>>> with otw.TempWorkDir('/tmp', prefix='run-', cleanup=True):
>>> #
>>> # Do stuff
>>> #
>>> print "..."
>>> print "Now I'm in a temporary directory"
>>> print os.getcwd()
>>> print "..."
>>> print "I'm back to my project directory :"
>>> print os.getcwd()
I'm on my project directory
/home/aguirre/otwrapy
...
Now I'm in a temporary directory
/tmp/run-pZYpzQ
...
I'm back to my project directory :
/home/aguirre/otwrapy
"""
def __init__(self, base_temp_work_dir=None, prefix='run-', cleanup=False,
transfer=None):
if base_temp_work_dir is not None:
safemakedirs(base_temp_work_dir)
self.dirname = mkdtemp(dir=base_temp_work_dir, prefix=prefix)
self.cleanup = cleanup
self.transfer = transfer
def __enter__(self):
self.curdir = os.getcwd()
os.chdir(self.dirname)
if self.transfer is not None:
for file in self.transfer:
if os.path.isfile(file):
shutil.copy(file, self.dirname)
elif os.path.isdir(file):
shutil.copytree(file, os.path.join(self.dirname,
file.split(os.sep)[-1]))
else:
raise Exception('In otwrapy.TempWorkDir : the current ' + \
'path "{}" is not a file '.format(file) + \
'nor a directory to transfer.')
return self.dirname
def __exit__(self, type, value, traceback):
os.chdir(self.curdir)
if self.cleanup:
shutil.rmtree(self.dirname)
def _exec_sample_joblib(func, n_cpus, verbosity):
"""Return a function that executes a sample in parallel using joblib.
Parameters
----------
func : Function or callable
A callable python object, usually a function. The function should take
an input vector as argument and return an output vector.
n_cpus : int
Number of CPUs on which to distribute the function calls.
Returns
-------
_exec_sample : Function or callable
The parallelized function.
"""
try:
from joblib import Parallel, delayed
except ImportError:
from sklearn.externals.joblib import Parallel, delayed
def _exec_sample(X):
Y = Parallel(n_jobs=n_cpus, verbose=verbosity)(
delayed(func)(x) for x in X)
return ot.Sample(Y)
return _exec_sample
def _exec_sample_multiprocessing(func, n_cpus):
"""Return a function that executes a sample in parallel using multiprocessing.
Parameters
----------
func : Function or callable
A callable python object, usually a function. The function should take
an input vector as argument and return an output vector.
n_cpus : int
Number of CPUs on which to distribute the function calls.
Returns
-------
_exec_sample : Function or callable
The parallelized function.
"""
def _exec_sample(X):
from multiprocessing import Pool
p = Pool(processes=n_cpus)
rs = p.map_async(func, X)
p.close()
return ot.Sample(rs.get())
return _exec_sample
def _exec_sample_pathos(func, n_cpus):
"""Return a function that executes a sample in parallel using pathos.
Parameters
----------
func : Function or callable
A callable python object, usually a function. The function should take
an input vector as argument and return an output vector.
n_cpus : int
Number of CPUs on which to distribute the function calls.
Returns
-------
_exec_sample : Function or callable
The parallelized function.
"""
def _exec_sample(X):
from pathos.multiprocessing import ProcessingPool
try:
p = ProcessingPool(n_cpus)
X = np.array(X)
x = np.array_split(X, n_cpus)
# array_split is not supposed to return a list of length n_cpus when len(X)<n_cpus
n_active = min(len(X), n_cpus)
pipe = []
for i in range(n_active):
pipe.append(p.apipe(func, x[i]))
rs = []
for i in range(n_active):
rs.append(pipe[i].get())
rs = [item for sublist in rs for item in sublist]
return ot.Sample(rs)
except ValueError:
# Get there if the chuck size left some single evaluations left
return func(X)
return _exec_sample
def _exec_sample_ipyparallel(func, n, p):
"""Return a function that executes a sample in parallel using ipyparallel.
Parameters
----------
func : Function or callable
A callable python object, usually a function. The function should take
an input vector as argument and return an output vector.
n_cpus : int
Number of CPUs on which to distribute the function calls.
Returns
-------
_exec_sample : Function or callable
The parallelized function.
"""
import ipyparallel as ipp
rc = ipp.Client()
return ot.PythonFunction(func_sample=lambda X:
rc[:].map_sync(func, X),
n=func.getInputDimension(),
p=func.getOutputDimension())
@FunctionDecorator(enableCache=True)
class Parallelizer(ot.OpenTURNSPythonFunction):
"""Parallelize a Wrapper using 'ipyparallel', 'joblib', 'pathos' or 'multiprocessing'.
Parameters
----------
wrapper : ot.Function or instance of ot.OpenTURNSPythonFunction
openturns wrapper to be distributed
backend : string (Optional)
Whether to parallelize using 'ipyparallel', 'joblib', pathos, or
'multiprocessing'.
n_cpus : int (Optional)
Number of CPUs on which the simulations will be distributed. Needed Only
if using 'joblib', pathos or 'multiprocessing' as backend.
verbosity : int (Optional)
verbose parameter when using 'joblib'. Default is 10.
Examples
--------
For example, in order to parallelize the beam wrapper :class:`examples.beam.Wrapper`
you simply instantiate your wrapper and parallelize it as follows:
>>> from otwrapy.examples.beam import Wrapper
>>> import otwrapy as otw
>>> model = otw.Parallelizer(Wrapper(), n_cpus=-1)
`model` will distribute calls to Wrapper() using multiprocessing and
as many CPUs as you have minus one for the scheduler.
Because Parallelize is decorated with :class:`FunctionDecorator`,
`model` is already an :class:`ot.Function`.
"""
def __init__(self, wrapper, backend='multiprocessing', n_cpus=-1, verbosity=10):
# -1 cpus means all available cpus - 1 for the scheduler
if n_cpus == -1:
import multiprocessing
n_cpus = multiprocessing.cpu_count() - 1
self.n_cpus = n_cpus
self.wrapper = wrapper
self.verbosity = verbosity
# This configures how to run single point simulations on the model:
self._exec = self.wrapper
ot.OpenTURNSPythonFunction.__init__(self,
self.wrapper.getInputDimension(),
self.wrapper.getOutputDimension())
self.setInputDescription(self.wrapper.getInputDescription())
self.setOutputDescription(self.wrapper.getOutputDescription())
assert backend in ['ipython', 'ipyparallel',
'multiprocessing', 'pathos',
'joblib'], "Unknown backend"
# This configures how to run samples on the model :
if self.n_cpus == 1:
self._exec_sample = self.wrapper
elif (backend == 'ipython') or (backend == 'ipyparallel'):
# Check that ipyparallel is installed
try:
import ipyparallel as ipp
# If it is, see if there is a cluster running
try:
rc = ipp.Client()
ipy_backend = True
except (ipp.error.TimeoutError, IOError) as e:
ipy_backend = False
import logging
logging.warning('Unable to connect to an ipython cluster.')
except ImportError:
ipy_backend = False
import logging
logging.warning('ipyparallel package missing.')
if ipy_backend:
self._exec_sample = _exec_sample_ipyparallel(self.wrapper,
self.getInputDimension(),
self.getOutputDimension())
else:
logging.warning('Using multiprocessing backend instead')
self._exec_sample = _exec_sample_multiprocessing(self.wrapper,
self.n_cpus)
elif backend == 'joblib':
# Check that joblib is installed
try:
import joblib
joblib_backend = True
except ImportError:
try:
from sklearn.externals import joblib
joblib_backend = True
except ImportError:
joblib_backend = False
import logging
logging.warning('joblib package missing.')
if joblib_backend:
self._exec_sample = _exec_sample_joblib(self.wrapper,
self.n_cpus,
self.verbosity)
else:
logging.warning('Using multiprocessing backend instead')
self._exec_sample = _exec_sample_multiprocessing(self.wrapper,
self.n_cpus)
elif backend == 'multiprocessing':
self._exec_sample = _exec_sample_multiprocessing(
self.wrapper, self.n_cpus)
elif backend == 'pathos':
self._exec_sample = _exec_sample_pathos(self.wrapper, self.n_cpus)
| lgpl-3.0 |
nddsg/TreeDecomps | xplodnTree/core/td_rndGraphs.py | 1 | 15573 | #!/usr/bin/env python
__version__="0.1.0"
# ToDo:
# [] process mult dimacs.trees to hrg
import sys
import math
import numpy as np
import traceback
import argparse
import os
from glob import glob
import networkx as nx
import pandas as pd
from PHRG import graph_checks
import subprocess
import math
import itertools
import graph_sampler as gs
import platform
from itertools import combinations
from collections import defaultdict
from arbolera import jacc_dist_for_pair_dfrms
import pprint as pp
import isomorph_interxn as isoint
#_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~_~#
def get_parser ():
parser = argparse.ArgumentParser(description='Random graphs (BA graph model). --clqs is used when'\
'.bz2 files are already computed given a path with wild card. Example: `python '\
'itd_rndGraphs.py --clqs`')
parser.add_argument('--clqs', action='store_true', default=False, required=False, \
help="tree_objs_to_hrg_clique_trees")
parser.add_argument('--bam', action='store_true', default=False, required=False, \
help="Barabasi-Albert graph model")
parser.add_argument('--version', action='version', version=__version__)
return parser
def dimacs_nddgo_tree(dimacsfnm_lst, heuristic):
'''
dimacsfnm_lst => list of dimacs file names
heuristic =====> list of variable elimination schemes to use
returns: results - a list of tree files
'''
# print heuristic,dimacsfnm_lst
results = []
for dimacsfname in dimacsfnm_lst:
if isinstance(dimacsfname, list): dimacsfname= dimacsfname[0]
nddgoout = ""
outfname = dimacsfname+"."+heuristic+".tree"
if platform.system() == "Linux":
args = ["bin/linux/serial_wis -f {} -nice -{} -w {}".format(dimacsfname, heuristic, outfname)]
else:
args = ["bin/mac/serial_wis -f {} -nice -{} -w {}".format(dimacsfname, heuristic, outfname)]
while not nddgoout:
popen = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)
popen.wait()
# output = popen.stdout.read()
out, err = popen.communicate()
nddgoout = out.split('\n')
results.append(outfname)
return results
def load_edgelist(gfname):
import pandas as pd
try:
edglst = pd.read_csv(gfname, comment='%', delimiter='\t')
# print edglst.shape
if edglst.shape[1]==1: edglst = pd.read_csv(gfname, comment='%', delimiter="\s+")
except Exception, e:
print "EXCEPTION:",str(e)
traceback.print_exc()
sys.exit(1)
if edglst.shape[1] == 3:
edglst.columns = ['src', 'trg', 'wt']
elif edglst.shape[1] == 4:
edglst.columns = ['src', 'trg', 'wt','ts']
else:
edglst.columns = ['src', 'trg']
g = nx.from_pandas_dataframe(edglst,source='src',target='trg')
g.name = os.path.basename(gfname)
return g
def nx_edges_to_nddgo_graph_sampling(graph, n, m, peo_h):
G = graph
if n is None and m is None: return
# n = G.number_of_nodes()
# m = G.number_of_edges()
nbr_nodes = 256
basefname = 'datasets/{}_{}'.format(G.name, peo_h)
K = int(math.ceil(.25*G.number_of_nodes()/nbr_nodes))
print "--", nbr_nodes, K, '--';
for j,Gprime in enumerate(gs.rwr_sample(G, K, nbr_nodes)):
# if gname is "":
# # nx.write_edgelist(Gprime, '/tmp/sampled_subgraph_200_{}.tsv'.format(j), delimiter="\t", data=False)
# gprime_lst.append(Gprime)
# else:
# # nx.write_edgelist(Gprime, '/tmp/{}{}.tsv'.format(gname, j), delimiter="\t", data=False)
# gprime_lst.append(Gprime)
# # print "... files written: /tmp/{}{}.tsv".format(gname, j)
edges = Gprime.edges()
edges = [(int(e[0]), int(e[1])) for e in edges]
df = pd.DataFrame(edges)
df.sort_values(by=[0], inplace=True)
ofname = basefname+"_{}.dimacs".format(j)
with open(ofname, 'w') as f:
f.write('c {}\n'.format(G.name))
f.write('p edge\t{}\t{}\n'.format(n,m))
# for e in df.iterrows():
output_edges = lambda x: f.write("e\t{}\t{}\n".format(x[0], x[1]))
df.apply(output_edges, axis=1)
# f.write("e\t{}\t{}\n".format(e[0]+1,e[1]+1))
if os.path.exists(ofname): print 'Wrote: {}'.format(ofname)
return basefname
def edgelist_dimacs_graph(orig_graph, peo_h, prn_tw = False):
fname = orig_graph
gname = os.path.basename(fname).split(".")
gname = sorted(gname,reverse=True, key=len)[0]
if ".tar.bz2" in fname:
from tdec.read_tarbz2 import read_tarbz2_file
edglst = read_tarbz2_file(fname)
df = pd.DataFrame(edglst,dtype=int)
G = nx.from_pandas_dataframe(df,source=0, target=1)
else:
G = nx.read_edgelist(fname, comments="%", data=False, nodetype=int)
# print "...", G.number_of_nodes(), G.number_of_edges()
# from numpy import max
# print "...", max(G.nodes()) ## to handle larger 300K+ nodes with much larger labels
N = max(G.nodes())
M = G.number_of_edges()
# +++ Graph Checks
if G is None: sys.exit(1)
G.remove_edges_from(G.selfloop_edges())
giant_nodes = max(nx.connected_component_subgraphs(G), key=len)
G = nx.subgraph(G, giant_nodes)
graph_checks(G)
# --- graph checks
G.name = gname
# print "...", G.number_of_nodes(), G.number_of_edges()
if G.number_of_nodes() > 500 and not prn_tw:
return (nx_edges_to_nddgo_graph_sampling(G, n=N, m=M, peo_h=peo_h), gname)
else:
return (nx_edges_to_nddgo_graph(G, n=N, m=M, varel=peo_h), gname)
def print_treewidth (in_dimacs, var_elim):
nddgoout = ""
if platform.system() == "Linux":
args = ["bin/linux/serial_wis -f {} -nice -{} -width".format(in_dimacs, var_elim)]
else:
args = ["bin/mac/serial_wis -f {} -nice -{} -width".format(in_dimacs, var_elim)]
while not nddgoout:
popen = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)
popen.wait()
# output = popen.stdout.read()
out, err = popen.communicate()
nddgoout = out.split('\n')
print nddgoout
return nddgoout
def tree_decomposition_with_varelims(fnames, var_elims):
'''
fnames ====> list of dimacs file names
var_elims => list of variable elimination schemes to use
returns:
'''
# print "~~~~ tree_decomposition_with_varelims",'.'*10
# print type(fnames), type(var_elims)
trees_files_d = {}#(list)
for f in fnames:
trees_files_d[f[0]]= [dimacs_nddgo_tree(f,td) for td in var_elims]
# for varel in var_elims:
# tree_files.append([dimacs_nddgo_tree([f], varel) for f in fnames])
return trees_files_d
def convert_nx_gObjs_to_dimacs_gObjs(nx_gObjs):
'''
Take list of graphs and convert to dimacs
'''
dimacs_glst=[]
for G in nx_gObjs:
N = max(G.nodes())
M = G.number_of_edges()
# +++ Graph Checks
if G is None: sys.exit(1)
G.remove_edges_from(G.selfloop_edges())
giant_nodes = max(nx.connected_component_subgraphs(G), key=len)
G = nx.subgraph(G, giant_nodes)
graph_checks(G)
# --- graph checks
G.name = "synthG_{}_{}".format(N,M)
from tdec.arbolera import nx_edges_to_nddgo_graph
dimacs_glst.append(nx_edges_to_nddgo_graph(G, n=N, m=M, save_g=True))
return dimacs_glst
def convert_dimacs_tree_objs_to_hrg_clique_trees(treeObjs):
# print '~~~~ convert_dimacs_tree_objs_to_hrg_clique_trees','~'*10
results = []
from dimacsTree2ProdRules import dimacs_td_ct
for f in treeObjs:
results.append(dimacs_td_ct(f[0], synthg=True))
return results
def get_hrg_prod_rules(prules):
'''
These are production rules
prules is list of bz2 files
'''
mdf = pd.DataFrame()#columns=['rnbr', 'lhs', 'rhs', 'pr'])
for f in prules:
df = pd.read_csv(f, index_col=0, compression='bz2')
df.columns=['rnbr', 'lhs', 'rhs', 'pr']
tname = os.path.basename(f).split(".")
df['cate'] = ".".join(tname[:2])
mdf = pd.concat([mdf,df])
mdf.to_csv(f.split(".")[0]+".prs", sep="\t", index=False)
return mdf
def get_isom_overlap_in_stacked_prod_rules(td_keys_lst, df ):
# for p in [",".join(map(str, comb)) for comb in combinations(td_keys_lst, 2)]:
# p p.split(',')
# print df[df.cate == p[0]].head()
# print df[df.cate == p[1]].head()
# print
# js = jacc_dist_for_pair_dfrms(stckd_df[stckd_df['cate']==p[0]],
# stckd_df[stckd_df['cate']==p[1]])
# print js
# print stckd_df[stckd_df['cate']==p[0]].head()
for comb in combinations(td_keys_lst, 2):
js = jacc_dist_for_pair_dfrms(df[df['cate']==comb[0]],
df[df['cate']==comb[1]])
print "\t", js
def graph_stats_and_visuals(gobjs=None):
"""
graph stats & visuals
:gobjs: input nx graph objects
:return:
"""
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'small',
'figure.figsize': (1.6 * 7, 1.0 * 7),
'axes.labelsize': 'small',
'axes.titlesize': 'small',
'xtick.labelsize': 'small',
'ytick.labelsize': 'small'}
pylab.rcParams.update(params)
import matplotlib.gridspec as gridspec
print "BA G(V,E)"
if gobjs is None:
gobjs = glob("datasets/synthG*.dimacs")
dimacs_g = {}
for fl in gobjs:
with open(fl, 'r') as f:
l=f.readline()
l=f.readline().rstrip('\r\n')
bn = os.path.basename(fl)
dimacs_g[bn] = [int(x) for x in l.split()[-2:]]
print "%d\t%s" %(dimacs_g[bn][0], dimacs_g[bn][1])
print "BA Prod rules size"
for k in dimacs_g.keys():
fname = "ProdRules/"+k.split('.')[0]+".prs"
f_sz = np.loadtxt(fname, delimiter="\t", dtype=str)
print k, len(f_sz)
def main ():
print "Hello"
#~#
#~# Graph stats and visualization
# graph_stats_and_visuals()
# exit()
n_nodes_set = [math.pow(2,x) for x in range(8,9,1)]
ba_gObjs = [nx.barabasi_albert_graph(n, 2) for n in n_nodes_set]
print 'Groups of Random Graphs (BA):'
print ' G(V,E):', [(g.number_of_nodes(), g.number_of_edges()) for g in ba_gObjs]
#~#
#~# convert to dimacs graph
print '~~~~ convert_nx_gObjs_to_dimacs_gObjs'
dimacs_gObjs = convert_nx_gObjs_to_dimacs_gObjs(ba_gObjs,)
#~#
#~# decompose the given graphs
print '~~~~ tree_decomposition_with_varelims'
var_el_m = ['mcs','mind','minf','mmd']
trees_d = tree_decomposition_with_varelims(dimacs_gObjs, var_el_m)
print ' ', trees_d.keys()
#~#
#~# dimacs tree to HRG clique tree
print '~~~~ convert_dimacs_tree_objs_to_hrg_clique_trees'
pr_rules_d={}
for k in trees_d.keys():
pr_rules_d[k] = convert_dimacs_tree_objs_to_hrg_clique_trees(trees_d[k])
#~#
#~# get stacked HRG prod rules
#~# - read sets of prod rules *.bz2
print '~~~~ get_hrg_prod_rules (stacked | prs)'
st_prs_d = {}
for k in pr_rules_d.keys():
st_prs_d[k] = get_hrg_prod_rules(pr_rules_d[k])
print' ', st_prs_d.keys()
for k in st_prs_d.keys():
df = pd.DataFrame(st_prs_d[k])
outfname = "Results/"+os.path.basename(k).split('.')[0]+"stckd_prs.tsv"
df[['rnbr','lhs','rhs','pr']].to_csv(outfname, header=False, index=False, sep="\t")
#~#
#~# get the isomophic overlap
# intxn_prod_rules = get_isom_overlap_in_stacked_prod_rules(stck_prod_rules)
# for nm in sorted(stck_prod_rules.groupby(['cate']).groups.keys()):
# if os.path.exists('ProdRules/'+nm+'.bz2'):
# print ' ProdRules/'+nm+'.bz2'
print '~~~~ get_isom_overlap_in_stacked_prod_rules'
for k in st_prs_d.keys():
df = st_prs_d[k]
gb = df.groupby(['cate']).groups.keys()
get_isom_overlap_in_stacked_prod_rules(gb, df)
#~#
#~# get the isomophic overlap production rules subset
#~# (two diff animals, not the same as the Jaccard Sim above)
print '~~~~ isom intrxn from stacked df'
for k in st_prs_d.keys():
stacked_df = st_prs_d[k]
iso_union, iso_interx = isoint.isomorph_intersection_2dfstacked(stacked_df)
gname = os.path.basename(k).split(".")[0]
iso_interx[[1,2,3,4]].to_csv('Results/{}_isom_interxn.tsv'.format(gname),
sep="\t", header=False, index=False)
#~#
# hrg_graph_gen_from_interxn(iso_interx[[1,2,3,4]])
#def hrg_graph_gen_from_interxn(iso_interxn_df):
def trees_to_hrg_clq_trees():
gname = 'synthG_15_60'
files = glob('ProdRules/{}*.bz2'.format(gname))
print files
print '\tNbr of files:',len(files)
prod_rules_lst = []
stacked_pr_rules = get_hrg_prod_rules(files)
print '\tSize of the df',len(stacked_pr_rules)
df = stacked_pr_rules
gb = df.groupby(['cate']).groups.keys()
print 'Jaccard Similarity'
A = get_isom_overlap_in_stacked_prod_rules(gb, df)
print A
print
iso_union, iso_interx = isoint.isomorph_intersection_2dfstacked(df)
iso_interx[[1,2,3,4]].to_csv('Results/{}_isom_interxn.tsv'.format(gname),
sep="\t", header=False, index=False)
if os.path.exists('Results/{}_isom_interxn.tsv'.format(gname)):
print 'Results/{}_isom_interxn.tsv'.format(gname)+' saved'
def main2 (args_d):
print "Hello"
#~#
#~# Graph stats and visualization
# graph_stats_and_visuals()
# exit()
if (args_d['bam']):
print "~~~~ Groups of Random Graphs (BA):"
n_nodes_set = [math.pow(2,x) for x in range(5,11,1)]
ba_gObjs = [nx.barabasi_albert_graph(n, 3) for n in n_nodes_set]
for g in ba_gObjs:
print "\tG(V,E):", (g.number_of_nodes(), g.number_of_edges())
out_el_fname = 'datasets/bar_alb_{}_exp3.tsv'.format(g.number_of_nodes())
if not os.path.exists(out_el_fname): nx.write_edgelist(g, out_el_fname, delimiter="\t")
print "\t",out_el_fname
#~#
#~# convert to dimacs graph
print '~~~~ convert to_dimacs'
dimacs_gObjs = convert_nx_gObjs_to_dimacs_gObjs(ba_gObjs,)
print "\t",type(dimacs_gObjs)
#~#
#~# decompose the given graphs
print '~~~~ tree_decomposition_with_varelims'
var_el_m = ['mcs','mind','minf','mmd','lexm','mcsm']
trees_d = tree_decomposition_with_varelims(dimacs_gObjs, var_el_m)
for k in trees_d.keys():
print '\t',k, "==>"
for v in trees_d[k]: print "\t ", v
#~#
#~# dimacs tree to HRG clique tree
print '~~~~ tree_objs_to_hrg_clique_trees'
print '~~~~ prules.bz2 saved in ProdRules; individual files'
pr_rules_d={}
for k in trees_d.keys():
pr_rules_d[k] = convert_dimacs_tree_objs_to_hrg_clique_trees(trees_d[k])
print "\tCT:", len(pr_rules_d[k])
#~#
#~# get stacked HRG prod rules
#~# - read sets of prod rules *.bz2
print '~~~~ Stacked HRG get_hrg_prod_rules (stacked | prs)'
st_prs_d = {}
for k in pr_rules_d.keys():
st_prs_d[k] = get_hrg_prod_rules(pr_rules_d[k])
print' ', st_prs_d.keys()
for k in st_prs_d.keys():
df = pd.DataFrame(st_prs_d[k])
outfname = "Results/"+os.path.basename(k).split('.')[0]+"stckd_prs.tsv"
df[['rnbr','lhs','rhs','pr']].to_csv(outfname, header=False, index=False, sep="\t")
#~#
#~# get the isomophic overlap
# intxn_prod_rules = get_isom_overlap_in_stacked_prod_rules(stck_prod_rules)
# for nm in sorted(stck_prod_rules.groupby(['cate']).groups.keys()):
# if os.path.exists('ProdRules/'+nm+'.bz2'):
# print ' ProdRules/'+nm+'.bz2'
print '\n~~~~ get_isom_overlap_in_stacked_prod_rules'
print '~~~~ output is Jaccard Sim Scores'
for k in st_prs_d.keys():
df = st_prs_d[k]
gb = df.groupby(['cate']).groups.keys()
get_isom_overlap_in_stacked_prod_rules(gb, df)
#~#
#~# get the isomophic overlap production rules subset
#~# (two diff animals, not the same as the Jaccard Sim above)
print '~~~~ isom intrxn from stacked df'
for k in st_prs_d.keys():
stacked_df = st_prs_d[k]
iso_union, iso_interx = isoint.isomorph_intersection_2dfstacked(stacked_df)
gname = os.path.basename(k).split(".")[0]
iso_interx[[1,2,3,4]].to_csv('Results/{}_isom_interxn.tsv'.format(gname),
sep="\t", header=False, index=False)
if os.path.exists('Results/{}_isom_interxn.tsv'.format(gname)):
print "\t", 'Written:','Results/{}_isom_interxn.tsv'.format(gname)
#~#
#~# hrg_graph_gen_from_interxn(iso_interx[[1,2,3,4]])
if __name__ == '__main__':
parser = get_parser()
args = vars(parser.parse_args())
try:
main(args)
except Exception, e:
print str(e)
traceback.print_exc()
sys.exit(1)
sys.exit(0)
| mit |
mahak/spark | python/pyspark/pandas/tests/data_type_ops/test_complex_ops.py | 7 | 9065 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
import datetime
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class ComplexOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def numeric_array_psers(self):
return [
pd.Series([[1, 2, 3]]),
pd.Series([[0.1, 0.2, 0.3]]),
pd.Series([[decimal.Decimal(1), decimal.Decimal(2), decimal.Decimal(3)]]),
]
@property
def non_numeric_array_psers(self):
return {
"string": pd.Series([["x", "y", "z"]]),
"date": pd.Series(
[[datetime.date(1994, 1, 1), datetime.date(1994, 1, 2), datetime.date(1994, 1, 3)]]
),
"bool": pd.Series([[True, True, False]]),
}
@property
def numeric_array_pssers(self):
return [ps.from_pandas(pser) for pser in self.numeric_array_psers]
@property
def non_numeric_array_pssers(self):
pssers = {}
for k, v in self.non_numeric_array_psers.items():
pssers[k] = ps.from_pandas(v)
return pssers
@property
def psers(self):
return self.numeric_array_psers + list(self.non_numeric_array_psers.values())
@property
def pssers(self):
return self.numeric_array_pssers + list(self.non_numeric_array_pssers.values())
@property
def pser(self):
return pd.Series([[1, 2, 3]])
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_add(self):
for pser, psser in zip(self.psers, self.pssers):
self.assert_eq(pser + pser, psser + psser)
with option_context("compute.ops_on_diff_frames", True):
# Numeric array + Numeric array
for pser1, psser1 in zip(self.numeric_array_psers, self.numeric_array_pssers):
for pser2, psser2 in zip(self.numeric_array_psers, self.numeric_array_pssers):
self.assert_eq((pser1 + pser2).sort_values(), (psser1 + psser2).sort_values())
# Non-numeric array + Non-numeric array
self.assertRaises(
TypeError,
lambda: self.non_numeric_array_pssers["string"]
+ self.non_numeric_array_pssers["bool"],
)
self.assertRaises(
TypeError,
lambda: self.non_numeric_array_pssers["string"]
+ self.non_numeric_array_pssers["date"],
)
self.assertRaises(
TypeError,
lambda: self.non_numeric_array_pssers["bool"]
+ self.non_numeric_array_pssers["date"],
)
for data_type in self.non_numeric_array_psers.keys():
self.assert_eq(
self.non_numeric_array_psers.get(data_type)
+ self.non_numeric_array_psers.get(data_type),
self.non_numeric_array_pssers.get(data_type)
+ self.non_numeric_array_pssers.get(data_type),
)
# Numeric array + Non-numeric array
for numeric_ppser in self.numeric_array_pssers:
for non_numeric_ppser in self.non_numeric_array_pssers.values():
self.assertRaises(TypeError, lambda: numeric_ppser + non_numeric_ppser)
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
with option_context("compute.ops_on_diff_frames", True):
for psser1 in self.pssers:
for psser2 in self.pssers:
self.assertRaises(TypeError, lambda: psser1 - psser2)
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
with option_context("compute.ops_on_diff_frames", True):
for psser1 in self.pssers:
for psser2 in self.pssers:
self.assertRaises(TypeError, lambda: psser1 * psser2)
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
with option_context("compute.ops_on_diff_frames", True):
for psser1 in self.pssers:
for psser2 in self.pssers:
self.assertRaises(TypeError, lambda: psser1 / psser2)
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
with option_context("compute.ops_on_diff_frames", True):
for psser1 in self.pssers:
for psser2 in self.pssers:
self.assertRaises(TypeError, lambda: psser1 // psser2)
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
with option_context("compute.ops_on_diff_frames", True):
for psser1 in self.pssers:
for psser2 in self.pssers:
self.assertRaises(TypeError, lambda: psser1 % psser2)
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
with option_context("compute.ops_on_diff_frames", True):
for psser1 in self.pssers:
for psser2 in self.pssers:
self.assertRaises(TypeError, lambda: psser1 ** psser2)
def test_radd(self):
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
for pser, psser in zip(self.psers, self.pssers):
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_isnull(self):
for pser, psser in zip(self.psers, self.pssers):
self.assert_eq(pser.isnull(), psser.isnull())
def test_astype(self):
self.assert_eq(self.pser.astype(str), self.psser.astype(str))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_complex_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
cybernet14/scikit-learn | sklearn/linear_model/passive_aggressive.py | 97 | 10879 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
leehongming/wrpc_sw | tools/wr_graph.py | 9 | 5280 | #!/usr/bin/python
#####################################################
## This work is part of the White Rabbit project
##
## The script analyzes log messages from
## _stat cont_ shell command of WRPC.
##
## Author: Grzegorz Daniluk, Anders Wallin
#####################################################
import sys
import time
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
DISP_FIELDS = ['lock', 'mu', 'cko', 'sec']
ANL_FIELDS = ['lock', 'mu', 'cko', 'sec', 'nsec', 'ss']
MIN_TIME_INTERVAL = 0.5 # for logs produced every second, MUST BE CHANGED if
MAX_TIME_INTERVAL = 1.5 # logs are produced more often
MAX_MU_JUMP = 4000
MAX_CKO_JUMP = 50
#####################################################
# parse log file and return 2d array with values we want to analyze (ANL_FIELDS)
def get_wr_data(filename):
f = open(filename)
vals = []
for i in range(0, len(ANL_FIELDS)):
vals.append([])
for line in f:
found = 0
if line.startswith( 'lnk:' ): # process only lines with stat-data
for field in line.split():
if found == len(ANL_FIELDS):
break
val = field.split(':')
if val[0] in ANL_FIELDS:
found +=1
try:
vals[ANL_FIELDS.index(val[0])].append(int(val[1]))
except ValueError:
vals[ANL_FIELDS.index(val[0])].append(val[1])
return vals
#####################################################
def plot_combo_data(vals):
fig = []
ax = []
tstamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
for i in range(0, len(vals)):
fig.append(plt.figure(i))
plt.title('%s, updated %s' % (DISP_FIELDS[i], tstamp) )
ax.append(fig[i].add_subplot(111))
ax[i].set_xlabel('datapoint number')
ax[i].set_ylabel(DISP_FIELDS[i])
ax[i].plot(range(0, len(vals[i])), vals[i], marker='.', linestyle='None', c='b',scaley=True,scalex=False)
ax[i].set_xlim([ 0 , len(vals[i]) ])
#####################################################
# analyze if _lock_ is always 1
def anl_lock(vals):
lock_idx = ANL_FIELDS.index("lock")
# find first sample when it got locked
start=0;
for lock in vals[lock_idx]:
if lock == 1:
break
start += 1
#now from that place we check if it's always locked
passed = 1;
for i in range(start, len(vals[lock_idx])):
if vals[lock_idx][i] != 1:
print "ERROR: loss lock at sample " + str(i)
passed = 0;
return passed
#####################################################
# analyze if _ss_ is always 'TRACK_PHASE'
def anl_state(vals):
state_idx = ANL_FIELDS.index("ss")
# find first sample when it got to TRACK_PHASE
start=0;
for ss in vals[state_idx]:
if ss == """'TRACK_PHASE'""":
break
start += 1
#now from that place we check if it's always TRACK_PHASE
passed = 1;
for i in range(start, len(vals[state_idx])):
if vals[state_idx][i] != """'TRACK_PHASE'""":
print "ERROR: quit TRACK_PHASE at sample " + str(i) + " to " + vals[state_idx][i]
passed = 0;
return passed
#####################################################
def anl_time(vals):
sec_idx = ANL_FIELDS.index("sec")
nsec_idx = ANL_FIELDS.index("nsec")
ss_idx = ANL_FIELDS.index("ss")
# first convert sec,nsec to float
time_log = []
for i in range(0, len(vals[sec_idx])):
time_log.append(float(vals[sec_idx][i]) + float(vals[nsec_idx][i])/1000000000)
# find where it first was synchronized (state == TRACK_PHASE)
start=0;
for ss in vals[ss_idx]:
if ss == """'TRACK_PHASE'""":
break
start += 1
passed = 1
# check if it always increases
for i in range(start+1, len(time_log)):
if time_log[i] < time_log[i-1] or time_log[i] - time_log[i-1] < MIN_TIME_INTERVAL or time_log[i] - time_log[i-1] > MAX_TIME_INTERVAL:
print "ERROR: time counter at sample " + str(i)
passed = 0;
return passed
#####################################################
def anl_rtt(vals):
mu_idx = ANL_FIELDS.index("mu")
passed = 1
for i in range(1, len(vals[mu_idx])):
if vals[mu_idx][i] - vals[mu_idx][i-1] > MAX_MU_JUMP or vals[mu_idx][i] - vals[mu_idx][i-1] < -MAX_MU_JUMP:
print "ERROR: rtt jump at sample " + str(i) + " is " + str(vals[mu_idx][i]-vals[mu_idx][i-1]) + "ps"
passed = 0
return passed
#####################################################
def anl_cko(vals):
ss_idx = ANL_FIELDS.index("ss")
cko_idx = ANL_FIELDS.index("cko")
# find where it was first synchronized (state == TRACK_PHASE)
start=0;
for ss in vals[ss_idx]:
if ss == """'TRACK_PHASE'""":
break
start += 1
passed = 1
for i in range(start, len(vals[cko_idx])):
if vals[cko_idx][i] > MAX_CKO_JUMP or vals[cko_idx][i] < -MAX_CKO_JUMP:
print "ERROR: cko too large at sample " + str(i) + " value is " + str(vals[cko_idx][i]) + "ps"
passed = 0
return passed
#####################################################
if len(sys.argv) == 1:
print "You did not specify the log file"
sys.exit()
vals = get_wr_data(sys.argv[1])
if anl_lock(vals):
print "LOCK:\t Success, always locked"
if anl_state(vals):
print "STATE:\t Success, always TRACK_PHASE"
if anl_time(vals):
print "TIME:\t Success, always growing"
if anl_rtt(vals):
print "RTT:\t Success, no jumps detected"
if anl_cko(vals):
print "CKO:\t Success, no values outside accepted range"
plot_combo_data(vals[0:len(DISP_FIELDS)])
plt.show()
| gpl-2.0 |
iohannez/gnuradio | gr-filter/examples/chirp_channelize.py | 7 | 7144 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 200000 # number of samples to use
self._fs = 9000 # initial sampling rate
self._M = 9 # Number of channels to channelize
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._fs, 500, 20,
attenuation_dB=10,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = numpy.ceil(float(len(self._taps)) / float(self._M))
print("Number of taps: ", len(self._taps))
print("Number of channels: ", self._M)
print("Taps per channel: ", tpc)
repeated = True
if(repeated):
self.vco_input = analog.sig_source_f(self._fs, analog.GR_SIN_WAVE, 0.25, 110)
else:
amp = 100
data = numpy.arange(0, amp, amp / float(self._N))
self.vco_input = blocks.vector_source_f(data, False)
# Build a VCO controlled by either the sinusoid or single chirp tone
# Then convert this to a complex signal
self.vco = blocks.vco_f(self._fs, 225, 1)
self.f2c = blocks.float_to_complex()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.vco_input, self.vco, self.f2c)
self.connect(self.f2c, self.head, self.pfb)
self.connect(self.f2c, self.snk_i)
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in range(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print("Run time: %f" % (tend - tstart))
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
fig3 = pylab.figure(4, figsize=(16,9), facecolor="w")
Ns = 650
Ne = 20000
fftlen = 8192
winfunc = numpy.blackman
fs = tb._fs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_in = numpy.arange(-fs / 2.0, fs / 2.0, fs / float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0 / fs
Tmax = len(d)*Ts
t_in = numpy.arange(0, Tmax, Ts)
x_in = numpy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(numpy.floor(numpy.sqrt(tb._M)))
Nrows = int(numpy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs / tb._M
Ts_o = 1.0 / fs_o
Tmax_o = len(d)*Ts_o
for i in range(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*numpy.log10(abs(X))
f_o = freq
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = numpy.array(d)
t_o = numpy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
sp3 = fig3.add_subplot(1,1,1)
p3 = sp3.plot(t_o, x_o.real)
sp3.set_xlim([min(t_o), max(t_o)+1])
sp3.set_ylim([-2, 2])
sp3.set_title("All Channels")
sp3.set_xlabel("Time (s)")
sp3.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
Gabriel-p/photom | IRAF_compare/final_phot.py | 2 | 2392 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from astropy.io import ascii
from astropy.table import Table
phot = ascii.read('3_DAOM/daom.obs', fill_values=('INDEF', np.nan))
id_, x, y, v, ev, b, eb, u, eu, i, ei, XV, XB, XU, XI =\
phot['col1'], phot['col2'], phot['col3'], phot['col4'], phot['col5'],\
phot['col6'], phot['col7'], phot['col8'], phot['col9'], phot['col10'],\
phot['col11'], phot['col14'], phot['col15'], phot['col16'], phot['col17']
# print(len(x))
# plt.subplot(121)
# plt.scatter(x, y, s=5)
# msk = ~np.isnan(v)
# x, y = x[msk], y[msk]
# print(len(x))
# plt.subplot(122)
# plt.scatter(x, y, s=5)
# plt.show()
ext_coeffs = {'U': .442, 'B': .232, 'V': .136, 'I': .048}
transf_coeffs = ascii.read('fit_coeffs.dat')
# Transform to zero airmass instrumental magnitudes.
V_za = v - ext_coeffs['V'] * XV[0]
B_za = b - ext_coeffs['B'] * XB[0]
U_za = u - ext_coeffs['U'] * XU[0]
I_za = i - ext_coeffs['I'] * XI[0]
# Generate instrumental colors.
BV_za, UB_za, VI_za = B_za - V_za, U_za - B_za, V_za - I_za
# Transform to calibrated standard system.
BV = transf_coeffs[1][3] * BV_za + transf_coeffs[1][4]
UB = transf_coeffs[2][3] * UB_za + transf_coeffs[2][4]
VI = transf_coeffs[3][3] * VI_za + transf_coeffs[3][4]
V = V_za + transf_coeffs[0][3] * BV + transf_coeffs[0][4]
# Errors in calibrated system.
eBV = (eb + ev) * transf_coeffs[1][3]
eVI = (ev + ei) * transf_coeffs[2][3]
eUB = (eu + eb) * transf_coeffs[3][3]
eV = ev
plt.style.use('seaborn-darkgrid')
fig = plt.figure(figsize=(30, 10))
gs = gridspec.GridSpec(1, 3)
ax = fig.add_subplot(gs[0])
plt.xlim(-1., 4.5)
plt.ylim(22.5, 10.)
plt.title("BV vs V")
plt.scatter(BV, V, s=3)
ax = fig.add_subplot(gs[1])
plt.xlim(-1., 4.5)
plt.ylim(22.5, 10.)
plt.title("VI vs V")
plt.scatter(VI, V, s=3)
ax = fig.add_subplot(gs[2])
plt.xlim(0., 3.)
plt.ylim(1.7, -1.)
plt.title("BV vs UB")
plt.scatter(BV, UB, s=3)
fig.tight_layout()
plt.savefig('output/final_phot.png', dpi=300, bbox_inches='tight')
tab = Table([id_, x, y, V, eV, BV, eBV, VI, eVI, UB, eUB],
names=('ID', 'x', 'y', 'V', 'eV', 'BV', 'eBV', 'VI', 'eVI',
'UB', 'eUB'))
ascii.write(
tab, 'input/phot_compare/final_phot.dat', format='fixed_width',
delimiter=' ', formats={_: '%10.4f' for _ in tab.keys()[1:]},
fill_values=[(ascii.masked, 'nan')], overwrite=True)
| gpl-3.0 |
subodhchhabra/pandashells | pandashells/bin/p_rand.py | 3 | 5729 | #! /usr/bin/env python
# standard library imports
import argparse
import textwrap
import sys # NOQA importing sys so I can mock sys.argv in tests
from pandashells.lib import module_checker_lib, arg_lib
module_checker_lib.check_for_modules(['pandas'])
from pandashells.lib import io_lib
import pandas as pd
import numpy as np
# want different default mu values for normal and poisson distributions
def fill_default_mu(args):
if args.type[0] == 'normal':
args.mu = [0.] if args.mu is None else args.mu
elif args.type[0] == 'poisson':
args.mu = [1.] if args.mu is None else args.mu
return args
def get_samples(args):
"""
Return samples from selected distribution
"""
# dictionary to hold numpy arguments for different distributions
distribution_for = {
'uniform': {
'function': np.random.uniform,
'kwargs': {
'low': args.min[0],
'high': args.max[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
'normal': {
'function': np.random.normal,
'kwargs': {
'loc': args.mu[0] if args.mu else None,
'scale': args.sigma[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
'poisson': {
'function': np.random.poisson,
'kwargs': {
'lam': args.mu[0] if args.mu else None,
'size': (args.num_samples[0], args.columns[0]),
},
},
'beta': {
'function': np.random.beta,
'kwargs': {
'a': args.alpha[0],
'b': args.beta[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
'gamma': {
'function': np.random.gamma,
'kwargs': {
'shape': args.alpha[0],
'scale': 1. / args.beta[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
'binomial': {
'function': np.random.binomial,
'kwargs': {
'n': args.N[0],
'p': args.p[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
}
# grab the function for generating proper distribution
dist = distribution_for[args.type[0]]
# call the random generating function with the proper kwargs
values = dist['function'](**dist['kwargs'])
# set column names of output dataframe
columns = ['c{}'.format(c) for c in range(args.columns[0])]
# framify and return results
return pd.DataFrame(values, columns=columns)
def main():
msg = textwrap.dedent(
"""
Return random samples from common probability distrubtions.
-----------------------------------------------------------------------
Examples:
uniform: p.rand -n 1000 -t uniform --min=0 --max=1 | p.hist
normal: p.rand -n 1000 -t normal --mu=0 --sigma=1 | p.hist
poisson: p.rand -n 1000 -t poisson --mu=1 | p.hist
beta: p.rand -n 1000 -t beta --alpha=2 --beta=6 | p.hist
gamma: p.rand -n 1000 -t gamma --alpha=1 --beta=1 | p.hist
binomial: p.rand -n 1000 -t binomial --N=10 --p=0.4 | p.hist
-----------------------------------------------------------------------
"""
)
# read command line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=msg)
parser.add_argument(
'-t', '--type', nargs=1, type=str, default=['uniform'],
choices=['uniform', 'normal', 'beta', 'gamma', 'binomial', 'poisson'],
help='type of distribution (default=\'uniform\')')
parser.add_argument(
'-n', '--num_samples', nargs=1, default=[10], type=int,
help='The number of rows to generate (default=10)')
parser.add_argument(
'-c', '--columns', nargs=1, default=[1], type=int,
help='The number of columns to generate per row (default=1)')
parser.add_argument(
'--N', nargs=1, default=[10], type=int,
help=(
'(Binomial Dist) Largest possible value for random variable. '
'(default=10)'
)
)
parser.add_argument(
'--p', nargs=1, default=[.5], type=float,
help=(
'(Binomial Dist) Bernoulli probability for each trial'
'(default=.5)'
)
)
parser.add_argument(
'--mu', nargs=1, type=float,
help='(Normal, Poisson) Mean (defaults: normal:0, poisson:1')
parser.add_argument(
'--sigma', nargs=1, default=[1.], type=float,
help='(Normal) standard deviation, (default: 1)')
parser.add_argument(
'--min', nargs=1, default=[0.], type=float,
help='(Uniform) Minimum value of range, (default: 0)')
parser.add_argument(
'--max', nargs=1, default=[1.], type=float,
help='(Uniform) Maximum value of range, (default: 1)')
parser.add_argument(
'--alpha', nargs=1, default=[2.], type=float,
help='(Beta, Gamma) (default: 2)')
parser.add_argument(
'--beta', nargs=1, default=[2.], type=float,
help='(Beta, Gamma) (default: 2)')
arg_lib.add_args(parser, 'io_out')
# parse arguments
args = parser.parse_args()
# set some defaults
args = fill_default_mu(args)
# get the samples
df = get_samples(args)
# write dataframe to output
io_lib.df_to_output(args, df)
if __name__ == '__main__': # pragma: no cover
main()
| bsd-2-clause |
vermouthmjl/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 94 | 2264 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
kagayakidan/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
jreback/pandas | pandas/tests/frame/methods/test_get_numeric_data.py | 2 | 3173 | import numpy as np
import pandas as pd
from pandas import Categorical, DataFrame, Index, Series, Timestamp
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestGetNumericData:
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
obj = DataFrame({"A": [1, "2", 3.0]})
result = obj._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
tm.assert_frame_equal(result, expected)
def test_get_numeric_data(self):
datetime64name = np.dtype("M8[ns]").name
objectname = np.dtype(np.object_).name
df = DataFrame(
{"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[
np.dtype("float64"),
np.dtype("int64"),
np.dtype(objectname),
np.dtype(datetime64name),
],
index=["a", "b", "c", "f"],
)
tm.assert_series_equal(result, expected)
df = DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
"d": np.array([1.0] * 10, dtype="float32"),
"e": np.array([1] * 10, dtype="int32"),
"f": np.array([1] * 10, dtype="int16"),
"g": Timestamp("20010102"),
},
index=np.arange(10),
)
result = df._get_numeric_data()
expected = df.loc[:, ["a", "b", "d", "e", "f"]]
tm.assert_frame_equal(result, expected)
only_obj = df.loc[:, ["c", "g"]]
result = only_obj._get_numeric_data()
expected = df.loc[:, []]
tm.assert_frame_equal(result, expected)
df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]})
tm.assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
tm.assert_frame_equal(result, expected)
def test_get_numeric_data_mixed_dtype(self):
# numeric and object columns
df = DataFrame(
{
"a": [1, 2, 3],
"b": [True, False, True],
"c": ["foo", "bar", "baz"],
"d": [None, None, None],
"e": [3.14, 0.577, 2.773],
}
)
result = df._get_numeric_data()
tm.assert_index_equal(result.columns, Index(["a", "b", "e"]))
def test_get_numeric_data_extension_dtype(self):
# GH#22290
df = DataFrame(
{
"A": pd.array([-10, np.nan, 0, 10, 20, 30], dtype="Int64"),
"B": Categorical(list("abcabc")),
"C": pd.array([0, 1, 2, 3, np.nan, 5], dtype="UInt8"),
"D": IntervalArray.from_breaks(range(7)),
}
)
result = df._get_numeric_data()
expected = df.loc[:, ["A", "C"]]
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
mhue/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
marqh/cartopy | lib/cartopy/examples/global_map.py | 1 | 2094 | # (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
"""
This example demonstrates the way a box is warped when it is defined in a rotated space.
Notice that the box contains the north pole, adding extra complexity to the underlying transformation.
"""
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
def main():
pc = ccrs.PlateCarree()
rob = ccrs.Robinson()
sph = ccrs.Geodetic()
ax = plt.axes(projection=rob)
# ax = plt.axes(projection=pc)
# ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_global()
from cartopy.examples.waves import sample_data
x, y, data = sample_data((20, 40))
# ax.contourf(x, y, data, transform=pc, alpha=0.3)
# ax.contour(x, y, data, transform=pc, alpha=0.3)
# ax.contourf(x, y, data, 3, transform=pc, alpha=0.3)
# ax.contourf(x, y, data, 5, transform=pc, alpha=0.3)
#print 'getting domain'
#print ax.native_extents()
#print ax.map_domain(ccrs.PlateCarree())
#print ax.ll_boundary_poly()
ax.stock_img()
ax.coastlines()
#ax.gshhs_line()
# ax.coastlines_land()
plt.plot(-0.08, 51.53, 'o', transform=pc)
plt.plot([-0.08, 132], [51.53, 43.17], transform=pc)
plt.plot([-0.08, 132], [51.53, 43.17], transform=sph)
# ax.gshhs_line(resolution='coarse', domain=ax.boundary_poly())
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
turbomanage/training-data-analyst | courses/machine_learning/deepdive/09_sequence/labs/txtclsmodel/trainer/model.py | 3 | 11578 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import pandas as pd
import numpy as np
import re
import pickle
from tensorflow.python.keras.preprocessing import sequence
from tensorflow.python.keras.preprocessing import text
from tensorflow.python.keras import models
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.layers import Embedding
from tensorflow.python.keras.layers import Conv1D
from tensorflow.python.keras.layers import MaxPooling1D
from tensorflow.python.keras.layers import GlobalAveragePooling1D
from google.cloud import storage
tf.logging.set_verbosity(tf.logging.INFO)
CLASSES = {'github': 0, 'nytimes': 1, 'techcrunch': 2} # label-to-int mapping
TOP_K = 20000 # Limit on the number vocabulary size used for tokenization
MAX_SEQUENCE_LENGTH = 50 # Sentences will be truncated/padded to this length
"""
Helper function to download data from Google Cloud Storage
# Arguments:
source: string, the GCS URL to download from (e.g. 'gs://bucket/file.csv')
destination: string, the filename to save as on local disk. MUST be filename
ONLY, doesn't support folders. (e.g. 'file.csv', NOT 'folder/file.csv')
# Returns: nothing, downloads file to local disk
"""
def download_from_gcs(source, destination):
search = re.search('gs://(.*?)/(.*)', source)
bucket_name = search.group(1)
blob_name = search.group(2)
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
bucket.blob(blob_name).download_to_filename(destination)
"""
Parses raw tsv containing hacker news headlines and returns (sentence, integer label) pairs
# Arguments:
train_data_path: string, path to tsv containing training data.
can be a local path or a GCS url (gs://...)
eval_data_path: string, path to tsv containing eval data.
can be a local path or a GCS url (gs://...)
# Returns:
((train_sentences, train_labels), (test_sentences, test_labels)): sentences
are lists of strings, labels are numpy integer arrays
"""
def load_hacker_news_data(train_data_path, eval_data_path):
if train_data_path.startswith('gs://'):
download_from_gcs(train_data_path, destination='train.csv')
train_data_path = 'train.csv'
if eval_data_path.startswith('gs://'):
download_from_gcs(eval_data_path, destination='eval.csv')
eval_data_path = 'eval.csv'
# Parse CSV using pandas
column_names = ('label', 'text')
df_train = pd.read_csv(train_data_path, names=column_names, sep='\t')
df_eval = pd.read_csv(eval_data_path, names=column_names, sep='\t')
return ((list(df_train['text']), np.array(df_train['label'].map(CLASSES))),
(list(df_eval['text']), np.array(df_eval['label'].map(CLASSES))))
"""
Create tf.estimator compatible input function
# Arguments:
texts: [strings], list of sentences
labels: numpy int vector, integer labels for sentences
tokenizer: tf.python.keras.preprocessing.text.Tokenizer
used to convert sentences to integers
batch_size: int, number of records to use for each train batch
mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.EVAL
# Returns:
tf.estimator.inputs.numpy_input_fn, produces feature and label
tensors one batch at a time
"""
def input_fn(texts, labels, tokenizer, batch_size, mode):
# Transform text to sequence of integers
x = # TODO (hint: use tokenizer)
# Fix sequence length to max value. Sequences shorter than the length are
# padded in the beginning and sequences longer are truncated
# at the beginning.
x = # TODO (hint: there is a useful function in tf.keras.preprocessing...)
# default settings for training
num_epochs = None
shuffle = True
# override if this is eval
if mode == tf.estimator.ModeKeys.EVAL:
num_epochs = 1
shuffle = False
return tf.estimator.inputs.numpy_input_fn(
x,
y=labels,
batch_size=batch_size,
num_epochs=num_epochs,
shuffle=shuffle,
queue_capacity=50000
)
"""
Builds a CNN model using keras and converts to tf.estimator.Estimator
# Arguments
model_dir: string, file path where training files will be written
config: tf.estimator.RunConfig, specifies properties of tf Estimator
filters: int, output dimension of the layers.
kernel_size: int, length of the convolution window.
embedding_dim: int, dimension of the embedding vectors.
dropout_rate: float, percentage of input to drop at Dropout layers.
pool_size: int, factor by which to downscale input at MaxPooling layer.
embedding_path: string , file location of pre-trained embedding (if used)
defaults to None which will cause the model to train embedding from scratch
word_index: dictionary, mapping of vocabulary to integers. used only if
pre-trained embedding is provided
# Returns
A tf.estimator.Estimator
"""
def keras_estimator(model_dir,
config,
learning_rate,
filters=64,
dropout_rate=0.2,
embedding_dim=200,
kernel_size=3,
pool_size=3,
embedding_path=None,
word_index=None):
# Create model instance.
model = models.Sequential()
num_features = min(len(word_index) + 1, TOP_K)
# Add embedding layer. If pre-trained embedding is used add weights to the
# embeddings layer and set trainable to input is_embedding_trainable flag.
if embedding_path != None:
embedding_matrix = get_embedding_matrix(word_index, embedding_path, embedding_dim)
is_embedding_trainable = True # set to False to freeze embedding weights
model.add(Embedding(input_dim=num_features,
output_dim=embedding_dim,
input_length=MAX_SEQUENCE_LENGTH,
weights=[embedding_matrix],
trainable=is_embedding_trainable))
else:
model.add(Embedding(input_dim=num_features,
output_dim=embedding_dim,
input_length=MAX_SEQUENCE_LENGTH))
model.add(Dropout(rate=dropout_rate))
model.add(Conv1D(filters=filters,
kernel_size=kernel_size,
activation='relu',
bias_initializer='random_uniform',
padding='same'))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Conv1D(filters=filters * 2,
kernel_size=kernel_size,
activation='relu',
bias_initializer='random_uniform',
padding='same'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(rate=dropout_rate))
model.add(Dense(len(CLASSES), activation='softmax'))
# Compile model with learning parameters.
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc'])
estimator = # TODO: convert keras model to tf.estimator.Estimator
return estimator
"""
Defines the features to be passed to the model during inference
Expects already tokenized and padded representation of sentences
# Arguments: none
# Returns: tf.estimator.export.ServingInputReceiver
"""
def serving_input_fn():
feature_placeholder = tf.placeholder(tf.int16, [None, MAX_SEQUENCE_LENGTH])
features = feature_placeholder # pass as-is
return tf.estimator.export.TensorServingInputReceiver(features, feature_placeholder)
"""
Takes embedding for generic voabulary and extracts the embeddings
matching the current vocabulary
The pre-trained embedding file is obtained from https://nlp.stanford.edu/projects/glove/
# Arguments:
word_index: dict, {key =word in vocabulary: value= integer mapped to that word}
embedding_path: string, location of the pre-trained embedding file on disk
embedding_dim: int, dimension of the embedding space
# Returns: numpy matrix of shape (vocabulary, embedding_dim) that contains the embedded
representation of each word in the vocabulary.
"""
def get_embedding_matrix(word_index, embedding_path, embedding_dim):
# Read the pre-trained embedding file and get word to word vector mappings.
embedding_matrix_all = {}
# Download if embedding file is in GCS
if embedding_path.startswith('gs://'):
download_from_gcs(embedding_path, destination='embedding.csv')
embedding_path = 'embedding.csv'
with open(embedding_path) as f:
for line in f: # Every line contains word followed by the vector value
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embedding_matrix_all[word] = coefs
# Prepare embedding matrix with just the words in our word_index dictionary
num_words = min(len(word_index) + 1, TOP_K)
embedding_matrix = np.zeros((num_words, embedding_dim))
for word, i in word_index.items():
if i >= TOP_K:
continue
embedding_vector = embedding_matrix_all.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
"""
Main orchestrator. Responsible for calling all other functions in model.py
# Arguments:
output_dir: string, file path where training files will be written
hparams: dict, command line parameters passed from task.py
# Returns: nothing, kicks off training and evaluation
"""
def train_and_evaluate(output_dir, hparams):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
# Load Data
((train_texts, train_labels), (test_texts, test_labels)) = load_hacker_news_data(
hparams['train_data_path'], hparams['eval_data_path'])
# Create vocabulary from training corpus.
tokenizer = text.Tokenizer(num_words=TOP_K)
tokenizer.fit_on_texts(train_texts)
# Save token dictionary to use during prediction time
pickle.dump(tokenizer, open('tokenizer.pickled', 'wb'))
# Create estimator
run_config = tf.estimator.RunConfig(save_checkpoints_steps=500)
estimator = # TODO: create estimator
# Create TrainSpec
train_steps = hparams['num_epochs'] * len(train_texts) / hparams['batch_size']
train_spec = tf.estimator.TrainSpec(
input_fn=input_fn(
train_texts,
train_labels,
tokenizer,
hparams['batch_size'],
mode=tf.estimator.ModeKeys.TRAIN),
max_steps=train_steps
)
# Create EvalSpec
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn=input_fn(
test_texts,
test_labels,
tokenizer,
hparams['batch_size'],
mode=tf.estimator.ModeKeys.EVAL),
steps=None,
exporters=exporter,
start_delay_secs=10,
throttle_secs=10
)
# Start training
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| apache-2.0 |
pradyu1993/scikit-learn | examples/cluster/plot_cluster_iris.py | 2 | 2602 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print __doc__
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random'),
}
fignum = 1
for name, est in estimators.iteritems():
fig = pl.figure(fignum, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = pl.figure(fignum, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'),
)
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
pl.show()
| bsd-3-clause |
treycausey/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 31 | 2633 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
"""Affinity Propagation algorithm """
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
"""Test AffinityPropagation.predict"""
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
"""Test exception in AffinityPropagation.predict"""
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/lib/matplotlib/backends/backend_qt5agg.py | 11 | 7342 | """
Render to qt from agg
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os # not used
import sys
import ctypes
import warnings
import matplotlib
from matplotlib.figure import Figure
from .backend_agg import FigureCanvasAgg
from .backend_qt5 import QtCore
from .backend_qt5 import QtGui
from .backend_qt5 import FigureManagerQT
from .backend_qt5 import NavigationToolbar2QT
##### Modified Qt5 backend import
from .backend_qt5 import FigureCanvasQT
##### not used
from .backend_qt5 import show
from .backend_qt5 import draw_if_interactive
from .backend_qt5 import backend_version
######
from matplotlib.cbook import mplDeprecation
DEBUG = False
_decref = ctypes.pythonapi.Py_DecRef
_decref.argtypes = [ctypes.py_object]
_decref.restype = None
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG:
print('backend_qt5agg.new_figure_manager')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
return FigureManagerQT(canvas, num)
class FigureCanvasQTAggBase(object):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def drawRectangle(self, rect):
self._drawRect = rect
self.repaint()
def paintEvent(self, e):
"""
Copy the image from the Agg canvas to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
# FigureCanvasQT.paintEvent(self, e)
if DEBUG:
print('FigureCanvasQtAgg.paintEvent: ', self,
self.get_width_height())
if self.blitbox is None:
# matplotlib is in rgba byte order. QImage wants to put the bytes
# into argb format and is in a 4 byte unsigned int. Little endian
# system is LSB first and expects the bytes in reverse order
# (bgra).
if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian:
stringBuffer = self.renderer._renderer.tostring_bgra()
else:
stringBuffer = self.renderer._renderer.tostring_argb()
refcnt = sys.getrefcount(stringBuffer)
# convert the Agg rendered image -> qImage
qImage = QtGui.QImage(stringBuffer, self.renderer.width,
self.renderer.height,
QtGui.QImage.Format_ARGB32)
# get the rectangle for the image
rect = qImage.rect()
p = QtGui.QPainter(self)
# reset the image area of the canvas to be the back-ground color
p.eraseRect(rect)
# draw the rendered image on to the canvas
p.drawPixmap(QtCore.QPoint(0, 0), QtGui.QPixmap.fromImage(qImage))
# draw the zoom rectangle to the QPainter
if self._drawRect is not None:
p.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.DotLine))
x, y, w, h = self._drawRect
p.drawRect(x, y, w, h)
p.end()
# This works around a bug in PySide 1.1.2 on Python 3.x,
# where the reference count of stringBuffer is incremented
# but never decremented by QImage.
# TODO: revert PR #1323 once the issue is fixed in PySide.
del qImage
if refcnt != sys.getrefcount(stringBuffer):
_decref(stringBuffer)
else:
bbox = self.blitbox
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
t = int(b) + h
reg = self.copy_from_bbox(bbox)
stringBuffer = reg.to_string_argb()
qImage = QtGui.QImage(stringBuffer, w, h,
QtGui.QImage.Format_ARGB32)
pixmap = QtGui.QPixmap.fromImage(qImage)
p = QtGui.QPainter(self)
p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap)
p.end()
self.blitbox = None
self._drawRect = None
def draw(self):
"""
Draw the figure with Agg, and queue a request
for a Qt draw.
"""
# The Agg draw is done here; delaying it until the paintEvent
# causes problems with code that uses the result of the
# draw() to update plot elements.
FigureCanvasAgg.draw(self)
self._priv_update()
def blit(self, bbox=None):
"""
Blit the region in bbox
"""
self.blitbox = bbox
l, b, w, h = bbox.bounds
t = b + h
self.repaint(l, self.renderer.height-t, w, h)
def print_figure(self, *args, **kwargs):
FigureCanvasAgg.print_figure(self, *args, **kwargs)
self.draw()
class FigureCanvasQTAgg(FigureCanvasQTAggBase,
FigureCanvasQT, FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc.
Modified to import from Qt5 backend for new-style mouse events.
Public attribute
figure - A Figure instance
"""
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQtAgg: ', figure)
FigureCanvasQT.__init__(self, figure)
FigureCanvasAgg.__init__(self, figure)
self._drawRect = None
self.blitbox = None
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
# it has been reported that Qt is semi-broken in a windows
# environment. If `self.draw()` uses `update` to trigger a
# system-level window repaint (as is explicitly advised in the
# Qt documentation) the figure responds very slowly to mouse
# input. The work around is to directly use `repaint`
# (against the advice of the Qt documentation). The
# difference between `update` and repaint is that `update`
# schedules a `repaint` for the next time the system is idle,
# where as `repaint` repaints the window immediately. The
# risk is if `self.draw` gets called with in another `repaint`
# method there will be an infinite recursion. Thus, we only
# expose windows users to this risk.
if sys.platform.startswith('win'):
self._priv_update = self.repaint
else:
self._priv_update = self.update
class NavigationToolbar2QTAgg(NavigationToolbar2QT):
def __init__(*args, **kwargs):
warnings.warn('This class has been deprecated in 1.4 ' +
'as it has no additional functionality over ' +
'`NavigationToolbar2QT`. Please change your code to '
'use `NavigationToolbar2QT` instead',
mplDeprecation)
NavigationToolbar2QT.__init__(*args, **kwargs)
FigureCanvas = FigureCanvasQTAgg
FigureManager = FigureManagerQT
| mit |
yl565/statsmodels | statsmodels/iolib/summary.py | 22 | 33071 | from statsmodels.compat.python import range, lrange, lmap, lzip, zip_longest
import numpy as np
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.tableformatting import (gen_fmt, fmt_2,
fmt_params, fmt_base, fmt_2cols)
#from statsmodels.iolib.summary2d import summary_params_2dflat
#from summary2d import summary_params_2dflat
def forg(x, prec=3):
if prec == 3:
#for 3 decimals
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%9.3g' % x
else:
return '%9.3f' % x
elif prec == 4:
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%10.4g' % x
else:
return '%10.4f' % x
else:
raise NotImplementedError
def summary(self, yname=None, xname=None, title=0, alpha=.05,
returns='text', model_info=None):
"""
Parameters
-----------
yname : string
optional, Default is `Y`
xname : list of strings
optional, Default is `X.#` for # in p the number of regressors
Confidance interval : (0,1) not implimented
title : string
optional, Defualt is 'Generalized linear model'
returns : string
'text', 'table', 'csv', 'latex', 'html'
Returns
-------
Default :
returns='print'
Prints the summarirized results
Option :
returns='text'
Prints the summarirized results
Option :
returns='table'
SimpleTable instance : summarizing the fit of a linear model.
Option :
returns='csv'
returns a string of csv of the results, to import into a spreadsheet
Option :
returns='latex'
Not implimented yet
Option :
returns='HTML'
Not implimented yet
Examples (needs updating)
--------
>>> import statsmodels as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_results = sm.OLS(data.endog, data.exog).results
>>> print ols_results.summary()
...
Notes
-----
conf_int calculated from normal dist.
"""
import time as time
#TODO Make sure all self.model.__class__.__name__ are listed
model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weigthed least squares',
'RLM' : 'Robust linear model',
'GLM' : 'Generalized linear model'
}
model_methods = {'OLS' : 'Least Squares',
'GLS' : 'Least Squares',
'GLSAR' : 'Least Squares',
'WLS' : 'Least Squares',
'RLM' : '?',
'GLM' : '?'
}
if title==0:
title = model_types[self.model.__class__.__name__]
if yname is None:
try:
yname = self.model.endog_names
except AttributeError:
yname = 'y'
if xname is None:
try:
xname = self.model.exog_names
except AttributeError:
xname = ['var_%d' % i for i in range(len(self.params))]
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
modeltype = self.model.__class__.__name__
#dist_family = self.model.family.__class__.__name__
nobs = self.nobs
df_model = self.df_model
df_resid = self.df_resid
#General part of the summary table, Applicable to all? models
#------------------------------------------------------------
#TODO: define this generically, overwrite in model classes
#replace definition of stubs data by single list
#e.g.
gen_left = [('Model type:', [modeltype]),
('Date:', [date]),
('Dependent Variable:', yname), #What happens with multiple names?
('df model', [df_model])
]
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_title = title
gen_header = None
## gen_stubs_left = ('Model type:',
## 'Date:',
## 'Dependent Variable:',
## 'df model'
## )
## gen_data_left = [[modeltype],
## [date],
## yname, #What happens with multiple names?
## [df_model]
## ]
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = gen_fmt
)
gen_stubs_right = ('Method:',
'Time:',
'Number of Obs:',
'df resid'
)
gen_data_right = ([modeltype], #was dist family need to look at more
time_of_day,
[nobs],
[df_resid]
)
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = gen_fmt
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
tstats = {'OLS' : self.t(),
'GLS' : self.t(),
'GLSAR' : self.t(),
'WLS' : self.t(),
'RLM' : self.t(),
'GLM' : self.t()
}
prob_stats = {'OLS' : self.pvalues,
'GLS' : self.pvalues,
'GLSAR' : self.pvalues,
'WLS' : self.pvalues,
'RLM' : self.pvalues,
'GLM' : self.pvalues
}
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
param_header = {
'OLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLSAR' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'WLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLM' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'], #glm uses t-distribution
'RLM' : ['coef', 'std err', 'z', 'P>|z|', alp + ' Conf. Interval'] #checke z
}
params_stubs = xname
params = self.params
conf_int = self.conf_int(alpha)
std_err = self.bse
exog_len = lrange(len(xname))
tstat = tstats[modeltype]
prob_stat = prob_stats[modeltype]
# Simpletable should be able to handle the formating
params_data = lzip(["%#6.4g" % (params[i]) for i in exog_len],
["%#6.4f" % (std_err[i]) for i in exog_len],
["%#6.4f" % (tstat[i]) for i in exog_len],
["%#6.4f" % (prob_stat[i]) for i in exog_len],
["(%#5g, %#5g)" % tuple(conf_int[i]) for i in \
exog_len]
)
parameter_table = SimpleTable(params_data,
param_header[modeltype],
params_stubs,
title = None,
txt_fmt = fmt_2, #gen_fmt,
)
#special table
#-------------
#TODO: exists in linear_model, what about other models
#residual diagnostics
#output options
#--------------
#TODO: JP the rest needs to be fixed, similar to summary in linear_model
def ols_printer():
"""
print summary table for ols models
"""
table = str(general_table)+'\n'+str(parameter_table)
return table
def ols_to_csv():
"""
exports ols summary data to csv
"""
pass
def glm_printer():
table = str(general_table)+'\n'+str(parameter_table)
return table
pass
printers = {'OLS': ols_printer,
'GLM' : glm_printer
}
if returns=='print':
try:
return printers[modeltype]()
except KeyError:
return printers['OLS']()
def _getnames(self, yname=None, xname=None):
'''extract names from model or construct names
'''
if yname is None:
if hasattr(self.model, 'endog_names') and (
not self.model.endog_names is None):
yname = self.model.endog_names
else:
yname = 'y'
if xname is None:
if hasattr(self.model, 'exog_names') and (
not self.model.exog_names is None):
xname = self.model.exog_names
else:
xname = ['var_%d' % i for i in range(len(self.params))]
return yname, xname
def summary_top(results, title=None, gleft=None, gright=None, yname=None, xname=None):
'''generate top table(s)
TODO: this still uses predefined model_methods
? allow gleft, gright to be 1 element tuples instead of filling with None?
'''
#change of names ?
gen_left, gen_right = gleft, gright
#time and names are always included
import time
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
yname, xname = _getnames(results, yname=yname, xname=xname)
#create dictionary with default
#use lambdas because some values raise exception if they are not available
#alternate spellings are commented out to force unique labels
default_items = dict([
('Dependent Variable:', lambda: [yname]),
('Dep. Variable:', lambda: [yname]),
('Model:', lambda: [results.model.__class__.__name__]),
#('Model type:', lambda: [results.model.__class__.__name__]),
('Date:', lambda: [date]),
('Time:', lambda: time_of_day),
('Number of Obs:', lambda: [results.nobs]),
#('No. of Observations:', lambda: ["%#6d" % results.nobs]),
('No. Observations:', lambda: ["%#6d" % results.nobs]),
#('Df model:', lambda: [results.df_model]),
('Df Model:', lambda: ["%#6d" % results.df_model]),
#TODO: check when we have non-integer df
('Df Residuals:', lambda: ["%#6d" % results.df_resid]),
#('Df resid:', lambda: [results.df_resid]),
#('df resid:', lambda: [results.df_resid]), #check capitalization
('Log-Likelihood:', lambda: ["%#8.5g" % results.llf]) #doesn't exist for RLM - exception
#('Method:', lambda: [???]), #no default for this
])
if title is None:
title = results.model.__class__.__name__ + 'Regression Results'
if gen_left is None:
#default: General part of the summary table, Applicable to all? models
gen_left = [('Dep. Variable:', None),
('Model type:', None),
('Date:', None),
('No. Observations:', None),
('Df model:', None),
('Df resid:', None)]
try:
llf = results.llf
gen_left.append(('Log-Likelihood', None))
except: #AttributeError, NotImplementedError
pass
gen_right = []
gen_title = title
gen_header = None
#needed_values = [k for k,v in gleft + gright if v is None] #not used anymore
#replace missing (None) values with default values
gen_left_ = []
for item, value in gen_left:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_left_.append((item, value))
gen_left = gen_left_
if gen_right:
gen_right_ = []
for item, value in gen_right:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_right_.append((item, value))
gen_right = gen_right_
#check
missing_values = [k for k,v in gen_left + gen_right if v is None]
assert missing_values == [], missing_values
#pad both tables to equal number of rows
if gen_right:
if len(gen_right) < len(gen_left):
#fill up with blank lines to same length
gen_right += [(' ', ' ')] * (len(gen_left) - len(gen_right))
elif len(gen_right) > len(gen_left):
#fill up with blank lines to same length, just to keep it symmetric
gen_left += [(' ', ' ')] * (len(gen_right) - len(gen_left))
#padding in SimpleTable doesn't work like I want
#force extra spacing and exact string length in right table
gen_right = [('%-21s' % (' '+k), v) for k,v in gen_right]
gen_stubs_right, gen_data_right = zip_longest(*gen_right) #transpose row col
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = fmt_2cols #gen_fmt
)
else:
gen_table_right = [] #because .extend_right seems works with []
#moved below so that we can pad if needed to match length of gen_right
#transpose rows and columns, `unzip`
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = fmt_2cols
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
return general_table #, gen_table_left, gen_table_right
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, title=None):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
if skip_header:
param_header = None
_, xname = _getnames(results, yname=yname, xname=xname)
params_stubs = xname
exog_idx = lrange(len(xname))
params_data = lzip([forg(params[i], prec=4) for i in exog_idx],
[forg(std_err[i]) for i in exog_idx],
[forg(tvalues[i]) for i in exog_idx],
["%#6.3f" % (pvalues[i]) for i in exog_idx],
[forg(conf_int[i,0]) for i in exog_idx],
[forg(conf_int[i,1]) for i in exog_idx]
)
parameter_table = SimpleTable(params_data,
param_header,
params_stubs,
title = title,
txt_fmt = fmt_params #gen_fmt #fmt_2, #gen_fmt,
)
return parameter_table
def summary_params_frame(results, yname=None, xname=None, alpha=.05,
use_t=True):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'Conf. Int. Low', 'Conf. Int. Upp.']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'Conf. Int. Low', 'Conf. Int. Upp.']
_, xname = _getnames(results, yname=yname, xname=xname)
#------------------
from pandas import DataFrame
table = np.column_stack((params, std_err, tvalues, pvalues, conf_int))
return DataFrame(table, columns=param_header, index=xname)
def summary_params_2d(result, extras=None, endog_names=None, exog_names=None,
title=None):
'''create summary table of regression parameters with several equations
This allows interleaving of parameters with bse and/or tvalues
Parameters
----------
result : result instance
the result instance with params and attributes in extras
extras : list of strings
additional attributes to add below a parameter row, e.g. bse or tvalues
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
title : None or string
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
if endog_names is None:
#TODO: note the [1:] is specific to current MNLogit
endog_names = ['endog_%d' % i for i in
np.unique(result.model.endog)[1:]]
if exog_names is None:
exog_names = ['var%d' %i for i in range(len(result.params))]
#TODO: check formatting options with different values
#res_params = [['%10.4f'%item for item in row] for row in result.params]
res_params = [[forg(item, prec=4) for item in row] for row in result.params]
if extras: #not None or non-empty
#maybe this should be a simple triple loop instead of list comprehension?
#below_list = [[['%10s' % ('('+('%10.3f'%v).strip()+')')
extras_list = [[['%10s' % ('(' + forg(v, prec=3).strip() + ')')
for v in col]
for col in getattr(result, what)]
for what in extras
]
data = lzip(res_params, *extras_list)
data = [i for j in data for i in j] #flatten
stubs = lzip(endog_names, *[['']*len(endog_names)]*len(extras))
stubs = [i for j in stubs for i in j] #flatten
#return SimpleTable(data, headers=exog_names, stubs=stubs)
else:
data = res_params
stubs = endog_names
# return SimpleTable(data, headers=exog_names, stubs=stubs,
# data_fmts=['%10.4f'])
import copy
txt_fmt = copy.deepcopy(fmt_params)
txt_fmt.update(dict(data_fmts = ["%s"]*result.params.shape[1]))
return SimpleTable(data, headers=exog_names,
stubs=stubs,
title=title,
# data_fmts = ["%s"]),
txt_fmt = txt_fmt)
def summary_params_2dflat(result, endog_names=None, exog_names=None, alpha=0.05,
use_t=True, keep_headers=True, endog_cols=False):
#skip_headers2=True):
'''summary table for parameters that are 2d, e.g. multi-equation models
Parameters
----------
result : result instance
the result instance with params, bse, tvalues and conf_int
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
keep_headers : bool
If true (default), then sub-tables keep their headers. If false, then
only the first headers are kept, the other headerse are blanked out
endog_cols : bool
If false (default) then params and other result statistics have
equations by rows. If true, then equations are assumed to be in columns.
Not implemented yet.
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
res = result
params = res.params
if params.ndim == 2: # we've got multiple equations
n_equ = params.shape[1]
if not len(endog_names) == params.shape[1]:
raise ValueError('endog_names has wrong length')
else:
if not len(endog_names) == len(params):
raise ValueError('endog_names has wrong length')
n_equ = 1
#VAR doesn't have conf_int
#params = res.params.T # this is a convention for multi-eq models
if not isinstance(endog_names, list):
#this might be specific to multinomial logit type, move?
if endog_names is None:
endog_basename = 'endog'
else:
endog_basename = endog_names
#TODO: note, the [1:] is specific to current MNLogit
endog_names = res.model.endog_names[1:]
#check if we have the right length of names
tables = []
for eq in range(n_equ):
restup = (res, res.params[:,eq], res.bse[:,eq], res.tvalues[:,eq],
res.pvalues[:,eq], res.conf_int(alpha)[eq])
#not used anymore in current version
# if skip_headers2:
# skiph = (row != 0)
# else:
# skiph = False
skiph = False
tble = summary_params(restup, yname=endog_names[eq],
xname=exog_names, alpha=alpha, use_t=use_t,
skip_header=skiph)
tables.append(tble)
#add titles, they will be moved to header lines in table_extend
for i in range(len(endog_names)):
tables[i].title = endog_names[i]
table_all = table_extend(tables, keep_headers=keep_headers)
return tables, table_all
def table_extend(tables, keep_headers=True):
'''extend a list of SimpleTables, adding titles to header of subtables
This function returns the merged table as a deepcopy, in contrast to the
SimpleTable extend method.
Parameters
----------
tables : list of SimpleTable instances
keep_headers : bool
If true, then all headers are kept. If falls, then the headers of
subtables are blanked out.
Returns
-------
table_all : SimpleTable
merged tables as a single SimpleTable instance
'''
from copy import deepcopy
for ii, t in enumerate(tables[:]): #[1:]:
t = deepcopy(t)
#move title to first cell of header
#TODO: check if we have multiline headers
if t[0].datatype == 'header':
t[0][0].data = t.title
t[0][0]._datatype = None
t[0][0].row = t[0][1].row
if not keep_headers and (ii > 0):
for c in t[0][1:]:
c.data = ''
#add separating line and extend tables
if ii == 0:
table_all = t
else:
r1 = table_all[-1]
r1.add_format('txt', row_dec_below='-')
table_all.extend(t)
table_all.title = None
return table_all
def summary_return(tables, return_fmt='text'):
######## Return Summary Tables ########
# join table parts then print
if return_fmt == 'text':
strdrop = lambda x: str(x).rsplit('\n',1)[0]
#convert to string drop last line
return '\n'.join(lmap(strdrop, tables[:-1]) + [str(tables[-1])])
elif return_fmt == 'tables':
return tables
elif return_fmt == 'csv':
return '\n'.join(map(lambda x: x.as_csv(), tables))
elif return_fmt == 'latex':
#TODO: insert \hline after updating SimpleTable
import copy
table = copy.deepcopy(tables[0])
del table[-1]
for part in tables[1:]:
table.extend(part)
return table.as_latex_tabular()
elif return_fmt == 'html':
return "\n".join(table.as_html() for table in tables)
else:
raise ValueError('available output formats are text, csv, latex, html')
class Summary(object):
'''class to hold tables for result summary presentation
Construction does not take any parameters. Tables and text can be added
with the `add_` methods.
Attributes
----------
tables : list of tables
Contains the list of SimpleTable instances, horizontally concatenated tables are not saved separately.
extra_txt : string
extra lines that are added to the text output, used for warnings and explanations.
'''
def __init__(self):
self.tables = []
self.extra_txt = None
def __str__(self):
return self.as_text()
def __repr__(self):
#return '<' + str(type(self)) + '>\n"""\n' + self.__str__() + '\n"""'
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_table_2cols(self, res, title=None, gleft=None, gright=None,
yname=None, xname=None):
'''add a double table, 2 tables with one column merged horizontally
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
title : string or None
if None, then a default title is used.
gleft : list of tuples
elements for the left table, tuples are (name, value) pairs
If gleft is None, then a default table is created
gright : list of tuples or None
elements for the right table, tuples are (name, value) pairs
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
Returns
-------
None : tables are attached
'''
table = summary_top(res, title=title, gleft=gleft, gright=gright,
yname=yname, xname=xname)
self.tables.append(table)
def add_table_params(self, res, yname=None, xname=None, alpha=.05,
use_t=True):
'''create and add a table for the parameter estimates
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
Returns
-------
None : table is attached
'''
if res.params.ndim == 1:
table = summary_params(res, yname=yname, xname=xname, alpha=alpha,
use_t=use_t)
elif res.params.ndim == 2:
# _, table = summary_params_2dflat(res, yname=yname, xname=xname,
# alpha=alpha, use_t=use_t)
_, table = summary_params_2dflat(res, endog_names=yname,
exog_names=xname,
alpha=alpha, use_t=use_t)
else:
raise ValueError('params has to be 1d or 2d')
self.tables.append(table)
def add_extra_txt(self, etext):
'''add additional text that will be added at the end in text format
Parameters
----------
etext : string
string with lines that are added to the text output.
'''
self.extra_txt = '\n'.join(etext)
def as_text(self):
'''return tables as string
Returns
-------
txt : string
summary tables and extra text as one string
'''
txt = summary_return(self.tables, return_fmt='text')
if not self.extra_txt is None:
txt = txt + '\n\n' + self.extra_txt
return txt
def as_latex(self):
'''return tables as string
Returns
-------
latex : string
summary tables and extra text as string of Latex
Notes
-----
This currently merges tables with different number of columns.
It is recommended to use `as_latex_tabular` directly on the individual
tables.
'''
return summary_return(self.tables, return_fmt='latex')
def as_csv(self):
'''return tables as string
Returns
-------
csv : string
concatenated summary tables in comma delimited format
'''
return summary_return(self.tables, return_fmt='csv')
def as_html(self):
'''return tables as string
Returns
-------
html : string
concatenated summary tables in HTML format
'''
return summary_return(self.tables, return_fmt='html')
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog)
res = sm.OLS(data.endog, data.exog).fit()
#summary(
| bsd-3-clause |
gabyx/GRSFramework | simulations/python/modules/GRSFPlotSettings/loadSettings.py | 1 | 1329 | import sys,os,inspect
from .generalSettings import *
# use the matpllotlibrc settings in this folder
def loadPlotSettings(mplModule):
def getScriptPath():
return os.path.dirname(inspect.getfile(inspect.currentframe()))
matplotlibrcFile = os.path.join(getScriptPath(),"matplotlibrc")
# update matplotlib settings =======================================
if matplotlibrcFile is not None:
print("Setting matplotlib settings from file: %s" % matplotlibrcFile)
mplModule.rcParams = mplModule.rc_params_from_file(matplotlibrcFile, fail_on_error=True)
defaults = {
'lines.linewidth': defaultLineSettings["thin"] ,
'axes.linewidth': defaultLineSettings["thick"],
"lines.marker" : None, # the default marker
"lines.markeredgewidth" : cm2inch(defaultLineSettings["extra-thin"]), # the line width around the marker symbol
"lines.markersize" : cm2pt(0.12), # markersize, in points
"figure.figsize": (cm2inch(16),cm2inch(12))
}
mplModule.rcParams.update(defaults)
path = os.path.join(getScriptPath(),"MathMacros.sty")
mplModule.rcParams["text.latex.preamble"] = [r"\def\dontLoadMathEnv{}",r"\input{%s}" % path]
# ==================================================================
| gpl-3.0 |
dricciardelli/vae2vec | capt_gen_e2e_adv.py | 1 | 30123 | # -*- coding: utf-8 -*-
import math
import os
import tensorflow as tf
import numpy as np
import pandas as pd
import pickle
import pickle as pkl
import cv2
import skimage
import tensorflow.python.platform
from tensorflow.python.ops import rnn
from keras.preprocessing import sequence
from collections import Counter
from collections import defaultdict
import itertools
import discriminators
test_image_path='./data/acoustic-guitar-player.jpg'
vgg_path='./data/vgg16-20160129.tfmodel'
n=50000-2
def map_lambda():
return n+1
def rev_map_lambda():
return "<UNK>"
def load_text(n,capts,num_samples=None):
# fname = 'Oxford_English_Dictionary.txt'
# txt = []
# with open(fname,'rb') as f:
# txt = f.readlines()
# txt = [x.decode('utf-8').strip() for x in txt]
# txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# List of words
# word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # List of definitions
# def_list = [x.split(' ', 1)[1].strip()for x in txt]
with open('./training_data/training_data.pkl','rb') as raw:
word_list,dl=pkl.load(raw)
def_list=[]
# def_list=[' '.join(defi) for defi in def_list]
i=0
while i<len( dl):
defi=dl[i]
if len(defi)>0:
def_list+=[' '.join(defi)]
i+=1
else:
dl.pop(i)
word_list.pop(i)
maxlen=0
minlen=100
for defi in def_list:
minlen=min(minlen,len(defi.split()))
maxlen=max(maxlen,len(defi.split()))
print(minlen)
print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
# _map,rev_map=get_one_hot_map(word_list,def_list,n)
_map=pkl.load(open('maps.pkl','rb'))
rev_map=pkl.load(open('rev_maps.pkl','rb'))
if num_samples is not None:
num_samples=len(capts)
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# y = (36665, 56210)
# print _map
if capts is not None:
# y,mask = map_one_hot(capts[:num_samples],_map,maxlen,n)
# np.save('ycoh')
y=np.load('ycoh.npy','r')
else:
# np.save('X',X)
# np.save('yc',y)
# np.save('maskc',mask)
y=np.load('yaoh.npy','r')
X=np.load('Xaoh.npy','r')
mask=np.load('maskaoh.npy','r')
print (np.max(y))
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(lambda :n+1)
rev_map=defaultdict(lambda:"<UNK>")
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
print (len(words))
# random.shuffle(words)
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
for num_bits in range(binary_dim):
for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1):
bitmap=np.zeros(binary_dim)
bitmap[np.array(bit_config)]=1
num=bitmap*(2** np.arange(binary_dim ))
num=np.sum(num).astype(np.uint32)
word=words[i]
_map[word]=num
rev_map[num]=word
i+=1
if i>=len(words):
break
if i>=len(words):
break
# for word in words:
# i+=1
# _map[word]=i
# rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[n+2]='End'
print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
# print rev_map
return _map,rev_map
def map_word_emb(corpus,_map):
### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY)
rtn=[]
rtn2=[]
for word in corpus:
mapped=_map[word]
rtn.append(mapped)
if get_rand_vec:
mapped_rand=random.choice(list(_map.keys()))
while mapped_rand==word:
mapped_rand=random.choice(list(_map.keys()))
mapped_rand=_map[mapped_rand]
rtn2.append(mapped_rand)
if get_rand_vec:
return np.array(rtn),np.array(rtn2)
return np.array(rtn)
def map_one_hot(corpus,_map,maxlen,n):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print (total_not,len(corpus))
return rtn
else:
total_not=0
if not onehot:
rtn=np.zeros([len(corpus),binary_dim],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),2**binary_dim],dtype=np.float32)
for l,line in enumerate(corpus):
# if len(line)==0:
# rtn[l]=n+2
# else:
# if line not in _map:
# total_not+=1
mapped=_map[line]
if mapped==75001:
total_not+=1
if onehot:
binrep=np.zeros(2**binary_dim)
print line
binrep[mapped]=1
else:
binrep=(1&(mapped/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
rtn[l]=binrep
print (total_not,len(corpus))
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2,binary_dim],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
for l,_line in enumerate(corpus):
x=0
line=_line.split()
for i in range(min(len(line),maxlen)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
if form2:
binrep=(1&(mapped/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
rtn[l,i+1,:]=binrep
else:
rtn[l,i+1]=mapped
if mapped==75001:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
if form2:
rtn[l,x+1,:]=(1&(to_app/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
else:
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
print (nopes,totes,wtf)
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class Caption_Generator():
def __init__(self, dim_in, dim_embed, dim_hidden, batch_size, n_lstm_steps, n_words, init_b=None,from_image=False,n_input=None,n_lstm_input=None,n_z=None,discriminator=None):
self.dim_in = dim_in
self.dim_embed = dim_embed
self.dim_hidden = dim_hidden
self.batch_size = batch_size
self.n_lstm_steps = n_lstm_steps
self.n_words = n_words
self.n_input = n_input
self.n_lstm_input=n_lstm_input
self.n_z=n_z
self.discriminator=
if from_image:
with open(vgg_path,'rb') as f:
fileContent = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)
self.images = tf.placeholder("float32", [1, 224, 224, 3])
tf.import_graph_def(graph_def, input_map={"images":self.images})
graph = tf.get_default_graph()
self.sess = tf.InteractiveSession(graph=graph)
self.from_image=from_image
# declare the variables to be used for our word embeddings
self.word_embedding = tf.Variable(tf.random_uniform([self.n_z, self.dim_embed], -0.1, 0.1), name='word_embedding')
self.embedding_bias = tf.Variable(tf.zeros([dim_embed]), name='embedding_bias')
# declare the LSTM itself
self.lstm = tf.contrib.rnn.BasicLSTMCell(dim_hidden)
# declare the variables to be used to embed the image feature embedding to the word embedding space
self.img_embedding = tf.Variable(tf.random_uniform([dim_in, dim_hidden], -0.1, 0.1), name='img_embedding')
self.img_embedding_bias = tf.Variable(tf.zeros([dim_hidden]), name='img_embedding_bias')
# declare the variables to go from an LSTM output to a word encoding output
self.word_encoding = tf.Variable(tf.random_uniform([dim_hidden, self.n_z], -0.1, 0.1), name='word_encoding')
# initialize this bias variable from the preProBuildWordVocab output
# optional initialization setter for encoding bias variable
if init_b is not None:
self.word_encoding_bias = tf.Variable(init_b, name='word_encoding_bias')
else:
self.word_encoding_bias = tf.Variable(tf.zeros([self.n_z]), name='word_encoding_bias')
self.embw=tf.Variable(xavier_init(self.n_input,self.n_z),name='embw')
self.embb=tf.Variable(tf.zeros([self.n_z]),name='embb')
self.all_encoding_weights=[self.embw,self.embb]
def build_model(self):
# declaring the placeholders for our extracted image feature vectors, our caption, and our mask
# (describes how long our caption is with an array of 0/1 values of length `maxlen`
img = tf.placeholder(tf.float32, [self.batch_size, self.dim_in])
self.img=img
caption_placeholder = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
self.caption_placeholder=caption_placeholder
mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])
self.mask=mask
self.output_placeholder = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
network_weights = self._initialize_weights()
# getting an initial LSTM embedding from our image_imbedding
image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
flat_caption_placeholder=tf.reshape(caption_placeholder,[-1])
#leverage one-hot sparsity to lookup embeddings fast
embedded_input,KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],flat_caption_placeholder,logit=True)
KLD_loss=tf.multiply(KLD_loss,tf.reshape(mask,[-1,1]))
KLD_loss=tf.reduce_sum(KLD_loss)*0
word_embeddings=tf.matmul(embedded_input,self.word_embedding)+self.embedding_bias
word_embeddings=tf.reshape(word_embeddings,[self.batch_size,self.n_lstm_steps,-1])
#initialize lstm state
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
rnn_output=[]
with tf.variable_scope("RNN"):
# unroll lstm
for i in range(self.n_lstm_steps):
if i > 0:
# if this isn’t the first iteration of our LSTM we need to get the word_embedding corresponding
# to the (i-1)th word in our caption
current_embedding = word_embeddings[:,i-1,:]
else:
#if this is the first iteration of our LSTM we utilize the embedded image as our input
current_embedding = image_embedding
if i > 0:
# allows us to reuse the LSTM tensor variable on each iteration
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
# if i>0:
rnn_output.append(tf.expand_dims(out,1))
#perform classification of output
rnn_output=tf.concat(rnn_output,axis=1)
rnn_output=tf.reshape(rnn_output,[self.batch_size*(self.n_lstm_steps),-1])
encoded_output=tf.matmul(rnn_output,self.word_encoding)+self.word_encoding_bias
encoded_output=tf.reshape(tf.square(encoded_output),[self.batch_size*self.n_lstm_steps,-1])[:,1:]
#get loss
# normed_embedding= tf.nn.l2_normalize(encoded_output, dim=-1)
# normed_target=tf.nn.l2_normalize(embedded_input,dim=-1)
# cos_sim=tf.multiply(normed_embedding,normed_target)[:,1:]
# cos_sim=(tf.reduce_sum(cos_sim,axis=-1))
# cos_sim=tf.reshape(cos_sim,[self.batch_size,-1])
# cos_sim=tf.reduce_sum(cos_sim[:,1:]*mask[:,1:])
# cos_sim=cos_sim/tf.reduce_sum(mask[:,1:])
# self.exp_loss=tf.reduce_sum((-cos_sim))
# # self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size)
# total_loss = tf.reduce_sum(-(cos_sim))
# mse=tf.reduce_sum(tf.reshape(tf.square(encoded_output-embedded_input),[self.batch_size*self.n_lstm_steps,1]),axis=-1)[:,1:]*(mask[:,1:])
# mse=tf.reduce_sum(mse)/tf.reduce_sum(mask[:,1:])
with tf.variable_scope('D',reuse=True) as scope:
total_loss+=tf.reduce_mean(-tf.log(self.discriminator.discriminate(encoded_output,train=False)))
self.D2=self.discriminator.discriminate(tf.stop_gradients(encoded_output),train=True)
#average over timeseries length
# total_loss=tf.reduce_sum(masked_xentropy)/tf.reduce_sum(mask[:,1:])
self.print_loss=total_loss
total_loss+=KLD_loss/tf.reduce_sum(mask)
return total_loss, img, caption_placeholder, mask
def build_generator(self, maxlen, batchsize=1,from_image=False):
#same setup as `build_model` function
img = self.img
image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(batchsize,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(image_embedding, state)
previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(maxlen):
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(previous_word, state)
# get a get maximum probability word and it's encoding from the output of the LSTM
logit = tf.matmul(out, self.word_encoding) + self.word_encoding_bias
best_word = tf.argmax(logit, 1)
with tf.device("/cpu:0"):
# get the embedding of the best_word to use as input to the next iteration of our LSTM
previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
previous_word += self.embedding_bias
all_words.append(best_word)
self.img=img
self.all_words=all_words
return img, all_words
def _initialize_weights(self):
all_weights = dict()
trainability=False
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(self.n_z, self.n_lstm_input),name='affine_weight',trainable=trainability),
'affine_bias': tf.Variable(tf.zeros(self.n_lstm_input),name='affine_bias',trainable=trainability)}
with tf.device('/cpu:0'):
om=tf.Variable(xavier_init(self.n_z, self.n_z),name='out_mean',trainable=trainability)
if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_meanb',trainable=trainability),
'out_log_sigma': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_log_sigmab',trainable=trainability)}
all_weights['variational_encoding'] = {
'out_mean': om,
'out_log_sigma': tf.Variable(xavier_init(self.n_input, self.n_z),name='out_log_sigma',trainable=trainability)}
else:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_meanb',trainable=trainability)}
all_weights['variational_encoding'] = {
'out_mean': om}
# self.no_reload+=all_weights['input_meaning'].values()
# self.var_embs=[]
# if transfertype2:
# self.var_embs=all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
# self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
# if lstm_stack>1:
# self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
# all_weights['LSTM'] = {
# 'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
# 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
# 'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'),
# 'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'),
# 'lstm': self.lstm}
all_encoding_weights=[all_weights[x].values() for x in all_weights]
for w in all_encoding_weights:
self.all_encoding_weights+=w
return all_weights
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
# x=tf.matmul(x,self.embw)+self.embb
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x,lookup=True)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.n_input))
all_the_f_one_h.append(tf.one_hot(x,depth=self.n_input))
embedding=tf.matmul(z,self.word_embedding)+self.embedding_bias
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
with tf.device('/cpu:0'):
mu=tf.nn.embedding_lookup(weights['out_mean'],x)
mu+=biases['out_mean']
if not vanilla:
with tf.device('/cpu:0'):
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)
logvar+=biases['out_log_sigma']
if not vanilla:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def crop_image(self,x, target_height=227, target_width=227, as_float=True,from_path=True):
#image preprocessing to crop and resize image
image = (x)
if from_path==True:
image=cv2.imread(image)
if as_float:
image = image.astype(np.float32)
if len(image.shape) == 2:
image = np.tile(image[:,:,None], 3)
elif len(image.shape) == 4:
image = image[:,:,:,0]
height, width, rgb = image.shape
if width == height:
resized_image = cv2.resize(image, (target_height,target_width))
elif height < width:
resized_image = cv2.resize(image, (int(width * float(target_height)/height), target_width))
cropping_length = int((resized_image.shape[1] - target_height) / 2)
resized_image = resized_image[:,cropping_length:resized_image.shape[1] - cropping_length]
else:
resized_image = cv2.resize(image, (target_height, int(height * float(target_width) / width)))
cropping_length = int((resized_image.shape[0] - target_width) / 2)
resized_image = resized_image[cropping_length:resized_image.shape[0] - cropping_length,:]
return cv2.resize(resized_image, (target_height, target_width))
def read_image(self,path=None):
# parses image from file path and crops/resizes
if path is None:
path=test_image_path
img = crop_image(path, target_height=224, target_width=224)
if img.shape[2] == 4:
img = img[:,:,:3]
img = img[None, ...]
return img
def get_caption(self,x=None):
#gets caption from an image by feeding it through imported VGG16 graph
if self.from_image:
feat = read_image(x)
fc7 = self.sess.run(graph.get_tensor_by_name("import/Relu_1:0"), feed_dict={self.images:feat})
else:
fc7=np.load(x,'r')
generated_word_index= self.sess.run(self.generated_words, feed_dict={self.img:fc7})
generated_word_index = np.hstack(generated_word_index)
generated_words = [ixtoword[x] for x in generated_word_index]
punctuation = np.argmax(np.array(generated_words) == '.')+1
generated_words = generated_words[:punctuation]
generated_sentence = ' '.join(generated_words)
return (generated_sentence)
def get_data(annotation_path, feature_path):
#load training/validation data
annotations = pd.read_table(annotation_path, sep='\t', header=None, names=['image', 'caption'])
return np.load(feature_path,'r'), annotations['caption'].values
def preProBuildWordVocab(sentence_iterator, word_count_threshold=30): # function from Andre Karpathy's NeuralTalk
#process and vectorize training/validation captions
print('preprocessing %d word vocab' % (word_count_threshold, ))
word_counts = {}
nsents = 0
for sent in sentence_iterator:
nsents += 1
for w in sent.lower().split(' '):
word_counts[w] = word_counts.get(w, 0) + 1
vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]
print('preprocessed words %d -> %d' % (len(word_counts), len(vocab)))
ixtoword = {}
ixtoword[0] = '.'
wordtoix = {}
wordtoix['#START#'] = 0
ix = 1
for w in vocab:
wordtoix[w] = ix
ixtoword[ix] = w
ix += 1
word_counts['.'] = nsents
bias_init_vector = np.array([1.0*word_counts[ixtoword[i]] for i in ixtoword])
bias_init_vector /= np.sum(bias_init_vector)
bias_init_vector = np.log(bias_init_vector)
bias_init_vector -= np.max(bias_init_vector)
return wordtoix, ixtoword, bias_init_vector.astype(np.float32)
dim_embed = 256
dim_hidden = 256
dim_in = 4096
batch_size = 128
momentum = 0.9
n_epochs = 25
def train(learning_rate=0.001, continue_training=False):
tf.reset_default_graph()
feats, captions = get_data(annotation_path, feature_path)
wordtoix, ixtoword, init_b = preProBuildWordVocab(captions)
np.save('data/ixtoword', ixtoword)
print ('num words:',len(ixtoword))
sess = tf.InteractiveSession()
n_words = len(wordtoix)
maxlen = 30
X, final_captions, mask, _map = load_text(2**19-3,captions)
running_decay=1
decay_rate=0.9999302192204246
# with tf.device('/gpu:0'):
with tf.variable_scope('D',reuse=True):
D=discriminators.DLSTM(maxlen+2,2,256,512,n_z)
caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, batch_size, maxlen+2, n_words, np.zeros(n_lstm_input).astype(np.float32),n_input=n_input,n_lstm_input=n_lstm_input,n_z=n_z,discriminator=D)
loss, image, sentence, mask = caption_generator.build_model()
saver = tf.train.Saver(max_to_keep=100)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
tf.global_variables_initializer().run()
tf.train.Saver(var_list=caption_generator.all_encoding_weights,max_to_keep=100).restore(sess,tf.train.latest_checkpoint('./models/tensorflow'))
if continue_training:
saver.restore(sess,tf.train.latest_checkpoint(model_path))
losses=[]
for epoch in range(n_epochs):
if epoch==1:
for w in caption_generator.all_encoding_weights:
w.trainable=True
index = (np.arange(len(feats)).astype(int))
np.random.shuffle(index)
index=index[:]
i=0
for start, end in zip( range(0, len(index), batch_size), range(batch_size, len(index), batch_size)):
#format data batch
current_feats = feats[index[start:end]]
current_captions = captions[index[start:end]]
current_caption_ind = [x for x in map(lambda cap: [wordtoix[word] for word in cap.lower().split(' ')[:-1] if word in wordtoix], current_captions)]
current_caption_matrix = sequence.pad_sequences(current_caption_ind, padding='post', maxlen=maxlen+1)
current_caption_matrix = np.hstack( [np.full( (len(current_caption_matrix),1), 0), current_caption_matrix] )
current_mask_matrix = np.zeros((current_caption_matrix.shape[0], current_caption_matrix.shape[1]))
nonzeros = np.array([x for x in map(lambda x: (x != 0).sum()+2, current_caption_matrix )])
current_capts=final_captions[index[start:end]]
for ind, row in enumerate(current_mask_matrix):
row[:nonzeros[ind]] = 1
_, loss_value,total_loss = sess.run([train_op, caption_generator.print_loss,loss], feed_dict={
image: current_feats.astype(np.float32),
caption_generator.output_placeholder : current_caption_matrix.astype(np.int32),
mask : current_mask_matrix.astype(np.float32),
sentence : current_capts.astype(np.float32)
})
print("Current Cost: ", loss_value, "\t Epoch {}/{}".format(epoch, n_epochs), "\t Iter {}/{}".format(start,len(feats)))
losses.append(loss_value*running_decay)
if epoch<9:
if i%3==0:
running_decay*=decay_rate
else:
if i%8==0:
running_decay*=decay_rate
i+=1
print losses[-1]
print("Saving the model from epoch: ", epoch)
pkl.dump(losses,open('losses/loss_e2e.pkl','wb'))
saver.save(sess, os.path.join(model_path, 'model'), global_step=epoch)
learning_rate *= 0.95
def test(sess,image,generated_words,ixtoword,idx=0): # Naive greedy search
feats, captions = get_data(annotation_path, feature_path)
feat = np.array([feats[idx]])
saver = tf.train.Saver()
sanity_check= False
# sanity_check=True
if not sanity_check:
saved_path=tf.train.latest_checkpoint(model_path)
saver.restore(sess, saved_path)
else:
tf.global_variables_initializer().run()
generated_word_index= sess.run(generated_words, feed_dict={image:feat})
generated_word_index = np.hstack(generated_word_index)
generated_sentence = [ixtoword[x] for x in generated_word_index]
print(generated_sentence)
if __name__=='__main__':
model_path = './models/tensorflow_e2e_adv'
feature_path = './data/feats.npy'
annotation_path = './data/results_20130124.token'
import sys
feats, captions = get_data(annotation_path, feature_path)
n_input=19
binary_dim=n_input
n_lstm_input=512
n_z=512/2
zero_end_tok=True
form2=True
vanilla=True
onehot=False
same_embedding=False
if sys.argv[1]=='train':
train()
elif sys.argv[1]=='test':
ixtoword = np.load('data/ixtoword.npy').tolist()
n_words = len(ixtoword)
maxlen=15
sess = tf.InteractiveSession()
batch_size=1
caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, 1, maxlen+2, n_words,n_input=n_input,n_lstm_input=n_lstm_input,n_z=n_z)
image, generated_words = caption_generator.build_generator(maxlen=maxlen)
test(sess,image,generated_words,ixtoword,1) | mit |
sanjanalab/GUIDES | static/data/pre_processed/precompute_exon_expression.py | 2 | 2146 | # Goal: To precompute gene expression information for each exon using cPickle for easy access later.
# NO MORE PD DATAFRAMES. They are slow. Let's optimize on C.
import pickle
import msgpack
import json
import math
df_normalized = pickle.load(open('pd_by_tissue_normalized.p', "rb"))
tissues = df_normalized.columns[1:]
with open('genes_list.json') as genes_list_file:
genes_list = json.load(genes_list_file)
# gene format: {"ensembl_id": "ENSG00000261122.2", "name": "5S_rRNA", "description": ""}
for gene in genes_list:
exon = 0
df_gene = df_normalized[df_normalized.Id.str.contains(ensembl_gene + '_' + exon)]
# We're done here...
if df_gene.shape[0] == 0:
exon = 0
continue
# Make an associative array from tissues -> expression values
expression = {}
exon = df_gene.iloc[0]
for t in tissues:
expression_val = exon[t]
if math.isnan(expression_val):
expression_val = 0
expression[t] = expression_val
outfile_name = gene["ensembl_id"] + "_" + str(exon) + ".p"
output_path = os.path.join('../exon_expression_msgpack/', outfile_name)
with open(output_path, 'w') as outfile:
msgpack.dump(gRNAs, outfile)
seq = gene_exon_file(gene["ensembl_id"], exon)
import time
i = 0
def test_pandas_timing():
t0 = time.time()
with open('genes_list.json') as genes_list_file:
genes_list = json.load(genes_list_file)
for gene in genes_list[:100]:
df_gene = df_normalized[df_normalized.Id.str.contains(gene['ensembl_id'] + '_0')]
print time.time() - t0
print (time.time() - t0) / 100
def test_guides_timing():
t0 = time.time()
with open('genes_list.json') as genes_list_file:
genes_list = json.load(genes_list_file)
for gene in genes_list[:100]:
dfff_gene = getGuides(gene['ensembl_id'] + '_0')
print time.time() - t0
print (time.time() - t0) / 100
def getGuides(gene_exon):
try:
filename = gene_exon + ".p"
path = os.path.join('../GRCh37_guides_cpickle/', filename)
with open(path) as datafile:
gRNAs = cPickle.load(datafile)
return gRNAs
except IOError:
pass
| bsd-3-clause |
giuliavezzani/giuliavezzani.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
shuangshuangwang/spark | python/pyspark/sql/tests/test_pandas_udf_grouped_agg.py | 1 | 20804 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.rdd import PythonEvalType
from pyspark.sql import Row
from pyspark.sql.functions import array, explode, col, lit, mean, sum, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.types import ArrayType, TimestampType
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.util.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return v + 1
return plus_one
@property
def pandas_scalar_plus_two(self):
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_basic(self):
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
assert_frame_equal(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
assert_frame_equal(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
assert_frame_equal(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf(ArrayType(TimestampType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
assert_frame_equal(expected1, result1)
def test_complex_groupby(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v))
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v))
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
# groupby one expression and one python UDF
result6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum_udf(df.v))
expected6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum(df.v))
# groupby one expression and one scalar pandas UDF
result7 = (df.groupby(df.v % 2, plus_two(df.id))
.agg(sum_udf(df.v)).sort(['sum(v)', 'plus_two(id)']))
expected7 = (df.groupby(df.v % 2, plus_two(df.id))
.agg(sum(df.v)).sort(['sum(v)', 'plus_two(id)']))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
assert_frame_equal(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
assert_frame_equal(expected1, result1)
assert_frame_equal(expected2, result2)
assert_frame_equal(expected3, result3)
def test_retain_group_columns(self):
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.groupby('id').agg(array_udf(df['v']).alias('v2'))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
def test_register_vectorized_udf_basic(self):
sum_pandas_udf = pandas_udf(
lambda v: v.sum(), "integer", PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
self.assertEqual(sum_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
group_agg_pandas_udf = self.spark.udf.register("sum_pandas_udf", sum_pandas_udf)
self.assertEqual(group_agg_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
q = "SELECT sum_pandas_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
actual = sorted(map(lambda r: r[0], self.spark.sql(q).collect()))
expected = [1, 5]
self.assertEqual(actual, expected)
def test_grouped_with_empty_partition(self):
data = [Row(id=1, x=2), Row(id=1, x=3), Row(id=2, x=4)]
expected = [Row(id=1, sum=5), Row(id=2, x=4)]
num_parts = len(data) + 1
df = self.spark.createDataFrame(self.sc.parallelize(data, numSlices=num_parts))
f = pandas_udf(lambda x: x.sum(),
'int', PandasUDFType.GROUPED_AGG)
result = df.groupBy('id').agg(f(df['x']).alias('sum')).collect()
self.assertEqual(result, expected)
def test_grouped_without_group_by_clause(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max_udf(v):
return v.max()
df = self.spark.range(0, 100)
self.spark.udf.register('max_udf', max_udf)
with self.tempView("table"):
df.createTempView('table')
agg1 = df.agg(max_udf(df['id']))
agg2 = self.spark.sql("select max_udf(id) from table")
assert_frame_equal(agg1.toPandas(), agg2.toPandas())
def test_no_predicate_pushdown_through(self):
# SPARK-30921: We should not pushdown predicates of PythonUDFs through Aggregate.
import numpy as np
@pandas_udf('float', PandasUDFType.GROUPED_AGG)
def mean(x):
return np.mean(x)
df = self.spark.createDataFrame([
Row(id=1, foo=42), Row(id=2, foo=1), Row(id=2, foo=2)
])
agg = df.groupBy('id').agg(mean('foo').alias("mean"))
filtered = agg.filter(agg['mean'] > 40.0)
assert(filtered.collect()[0]["mean"] == 42.0)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_grouped_agg import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
wanghaven/nupic | examples/opf/tools/MirrorImageViz/mirrorImageViz.py | 50 | 7221 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Author: Surabhi Gupta
import sys
import numpy as np
import matplotlib.pylab as pyl
def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset):
'''Mirror Image Visualization: Shows the encoding space juxtaposed against the
coincidence space. The encoding space is the bottom-up sensory encoding and
the coincidence space depicts the corresponding activation of coincidences in
the SP. Hence, the mirror image visualization is a visual depiction of the
mapping of SP cells to the input representations.
Note:
* The files spBUOut and sensorBUOut are assumed to be in the output format
used for LPF experiment outputs.
* BU outputs for some sample datasets are provided. Specify the name of the
dataset as an option while running this script.
'''
lines = activeCoincsFile.readlines()
inputs = encodingsFile.readlines()
w = len(inputs[0].split(' '))-1
patterns = set([])
encodings = set([])
coincs = [] #The set of all coincidences that have won at least once
reUsedCoincs = []
firstLine = inputs[0].split(' ')
size = int(firstLine.pop(0))
spOutput = np.zeros((len(lines),40))
inputBits = np.zeros((len(lines),w))
print 'Total n:', size
print 'Total number of records in the file:', len(lines), '\n'
print 'w:', w
count = 0
for x in xrange(len(lines)):
inputSpace = [] #Encoded representation for each input
spBUout = [int(z) for z in lines[x].split(' ')]
spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP
temp = set(spBUout)
spOutput[x]=spBUout
input = [int(z) for z in inputs[x].split(' ')]
input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space
tempInput = set(input)
inputBits[x]=input
#Creating the encoding space
for m in xrange(size):
if m in tempInput:
inputSpace.append(m)
else:
inputSpace.append('|') #A non-active bit
repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active
reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active
#Dividing the coincidences into two difference categories.
if len(reUsed)==0:
coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary)
else:
reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput))
patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once
encodings = encodings.union(tempInput)
count +=1
overlap = {}
overlapVal = 0
seen = []
seen = (printOverlaps(coincs, coincs, seen))
print len(seen), 'sets of 40 cells'
seen = printOverlaps(reUsedCoincs, coincs, seen)
Summ=[]
for z in coincs:
c=0
for y in reUsedCoincs:
c += len(z[1].intersection(y[1]))
Summ.append(c)
print 'Sum: ', Summ
for m in xrange(3):
displayLimit = min(51, len(spOutput[m*200:]))
if displayLimit>0:
drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1)
else:
print 'No more records to display'
pyl.show()
def drawFile(dataset, matrix, patterns, cells, w, fnum):
'''The similarity of two patterns in the bit-encoding space is displayed alongside
their similarity in the sp-coinc space.'''
score=0
count = 0
assert len(patterns)==len(cells)
for p in xrange(len(patterns)-1):
matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]]
matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]]
score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:])))
count += len(matrix[p+1:,p])
print 'Score', score/count
fig = pyl.figure(figsize = (10,10), num = fnum)
pyl.matshow(matrix, fignum = fnum)
pyl.colorbar()
pyl.title('Coincidence Space', verticalalignment='top', fontsize=12)
pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17)
pyl.ylabel('Encoding space', fontsize=12)
def printOverlaps(comparedTo, coincs, seen):
""" Compare the results and return True if success, False if failure
Parameters:
--------------------------------------------------------------------
coincs: Which cells are we comparing?
comparedTo: The set of 40 cells we being compared to (they have no overlap with seen)
seen: Which of the cells we are comparing to have already been encountered.
This helps glue together the unique and reused coincs
"""
inputOverlap = 0
cellOverlap = 0
for y in comparedTo:
closestInputs = []
closestCells = []
if len(seen)>0:
inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))])
cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))])
for m in xrange( len(seen) ):
if len(seen[m][1].intersection(y[4]))==inputOverlap:
closestInputs.append(seen[m][2])
if len(seen[m][0].intersection(y[1]))==cellOverlap:
closestCells.append(seen[m][2])
seen.append((y[1], y[4], y[0]))
print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \
'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells)
return seen
if __name__=='__main__':
if len(sys.argv)<2: #Use basil if no dataset specified
print ('Input files required. Read documentation for details.')
else:
dataset = sys.argv[1]
activeCoincsPath = dataset+'/'+dataset+'_spBUOut.txt'
encodingsPath = dataset+'/'+dataset+'_sensorBUOut.txt'
activeCoincsFile=open(activeCoincsPath, 'r')
encodingsFile=open(encodingsPath, 'r')
analyzeOverlaps(activeCoincsFile, encodingsFile, dataset)
| agpl-3.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/matplotlib/tests/test_dviread.py | 15 | 1788 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from nose.tools import assert_equal
import matplotlib.dviread as dr
import os.path
original_find_tex_file = dr.find_tex_file
def setup():
dr.find_tex_file = lambda x: x
def teardown():
dr.find_tex_file = original_find_tex_file
def test_PsfontsMap():
filename = os.path.join(
os.path.dirname(__file__),
'baseline_images', 'dviread', 'test.map')
fontmap = dr.PsfontsMap(filename)
# Check all properties of a few fonts
for n in [1, 2, 3, 4, 5]:
key = 'TeXfont%d' % n
entry = fontmap[key]
assert_equal(entry.texname, key)
assert_equal(entry.psname, 'PSfont%d' % n)
if n not in [3, 5]:
assert_equal(entry.encoding, 'font%d.enc' % n)
elif n == 3:
assert_equal(entry.encoding, 'enc3.foo')
# We don't care about the encoding of TeXfont5, which specifies
# multiple encodings.
if n not in [1, 5]:
assert_equal(entry.filename, 'font%d.pfa' % n)
else:
assert_equal(entry.filename, 'font%d.pfb' % n)
if n == 4:
assert_equal(entry.effects, {'slant': -0.1, 'extend': 2.2})
else:
assert_equal(entry.effects, {})
# Some special cases
entry = fontmap['TeXfont6']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont7']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, 'font7.enc')
entry = fontmap['TeXfont8']
assert_equal(entry.filename, 'font8.pfb')
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont9']
assert_equal(entry.filename, '/absolute/font9.pfb')
| bsd-2-clause |
deeplook/bokeh | bokeh/sampledata/gapminder.py | 41 | 2655 | from __future__ import absolute_import
import pandas as pd
from os.path import join
import sys
from . import _data_dir
'''
This module provides a pandas DataFrame instance of four
of the datasets from gapminder.org.
These are read in from csvs that have been downloaded from Bokeh's
sample data on S3. But the original code that generated the csvs from the
raw gapminder data is available at the bottom of this file.
'''
data_dir = _data_dir()
datasets = [
'fertility',
'life_expectancy',
'population',
'regions',
]
for dataset in datasets:
filename = join(data_dir, 'gapminder_%s.csv' % dataset)
try:
setattr(
sys.modules[__name__],
dataset,
pd.read_csv(filename, index_col='Country')
)
except (IOError, OSError):
raise RuntimeError('Could not load gapminder data file "%s". Please execute bokeh.sampledata.download()' % filename)
__all__ = datasets
# ====================================================
# Original data is from Gapminder - www.gapminder.org.
# The google docs links are maintained by gapminder
# The following script was used to get the data from gapminder
# and process it into the csvs stored in bokeh's sampledata.
"""
population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls"
fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls"
life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls"
regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls"
def _get_data(url):
# Get the data from the url and return only 1962 - 2013
df = pd.read_excel(url, index_col=0)
df = df.unstack().unstack()
df = df[(df.index >= 1964) & (df.index <= 2013)]
df = df.unstack().unstack()
return df
fertility_df = _get_data(fertility_url)
life_expectancy_df = _get_data(life_expectancy_url)
population_df = _get_data(population_url)
regions_df = pd.read_excel(regions_url, index_col=0)
# have common countries across all data
fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))
population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))
regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))
fertility_df.to_csv('gapminder_fertility.csv')
population_df.to_csv('gapminder_population.csv')
life_expectancy_df.to_csv('gapminder_life_expectancy.csv')
regions_df.to_csv('gapminder_regions.csv')
"""
# ======================================================
| bsd-3-clause |
UCL/sfdata_wrangler | sfdata_wrangler/Utils.py | 2 | 1634 | __author__ = "Gregory D. Erhardt"
__copyright__ = "Copyright 2013 SFCTA"
__license__ = """
This file is part of sfdata_wrangler.
sfdata_wrangler is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
sfdata_wrangler is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with sfdata_wrangler. If not, see <http://www.gnu.org/licenses/>.
"""
import pandas as pd
import numpy as np
def cleanCrosstab(rows, cols, values, aggfunc=sum, weight=None):
"""
Performs a crosstab on the rows, cols and values specified.
In the end, if there are no observations, it the value is zero, but
if there are observations with a np.nan value, then those remain as
missing values.
Also, adds in proper row and column totals
"""
if weight is None:
t = pd.crosstab(rows, cols, values, aggfunc=aggfunc, dropna=False)
else:
t = pd.crosstab(rows, cols, values*weight, aggfunc=aggfunc, dropna=False)
count = pd.crosstab(rows, cols, dropna=False)
t = t.mask(count==0, other=0)
t['Total'] = t.sum(axis=1)
t = t.append(pd.Series(t.sum(axis=0), name='Total'))
return t
| gpl-3.0 |
prtx/Fast-Fourier-Transform-FFT-in-Notation-Recognition | fft.py | 1 | 1261 | import cmath
from scipy.io.wavfile import read
from matplotlib import pyplot
from pylab import specgram
from math import log
import numpy as np
note_list = ['A','A#','B','C','C#','D','D#','E','F','F#','G','G#',]
def freq_2_note(freq):
SEMITONE = 1.059463
try:
interval = int(round(log(freq/440.0, SEMITONE))) % 12
return note_list[interval]
except Exception:
pass
def omega(p, q):
return cmath.exp((2.0 * cmath.pi * 1j * q) / p)
def fft(signal):
n = len(signal)
if n == 1:
return signal
else:
Feven = fft([signal[i] for i in xrange(0, n, 2)])
Fodd = fft([signal[i] for i in xrange(1, n, 2)])
combined = [0] * n
for m in xrange(n/2):
combined[m] = Feven[m] + omega(n, -m) * Fodd[m]
combined[m + n/2] = Feven[m] - omega(n, -m) * Fodd[m]
return combined
def main():
frame_rate, amplitude = read('G.wav')
if type(amplitude[0]) == np.ndarray:
amplitude = (amplitude[:,0] + amplitude[:,1])/2
frequencies = [ freq_2_note(abs(freq)) for freq in fft(amplitude)]
note_count = []
for note in note_list:
note_count.append(frequencies.count(note))
print 'Notation is', note_list[note_count.index(max(note_count))]
if __name__ == '__main__':
main()
| mit |
hpfem/hermes2d | python/hermes2d/plot.py | 4 | 11213 | from hermes2d import Linearizer, Solution
def sln2png(sln, filename):
"""
Creates a nice png image of the Solution sln.
"""
plot_sln_mayavi(sln)
from enthought.mayavi.mlab import savefig
savefig(filename)
def plot_sln_mpl(sln, method="default", just_mesh=False, axes=None):
"""
Plots the Solution() instance sln using Linearizer() and matplotlib.
method = "default" ... creates a plot using triangles (the triangles are
not interpolated, so sometimes one can see small defects)
method = "contour" ... takes the vertices from linearizer and interpolates
them using contour and contourf (it doesn't take into account
the triangulation, so one can see the defects from the convex
hull approximation)
just_mesh ... only shows the mesh, but not the solution
"""
lin = Linearizer()
lin.process_solution(sln)
v = lin.get_vertices()
if method=="contour":
from numpy import min, max, linspace
from matplotlib.mlab import griddata
import matplotlib.pyplot as plt
x = v[:, 0]
y = v[:, 1]
z = v[:, 2]
# define grid.
xi = linspace(min(x), max(x), 100)
yi = linspace(min(y), max(y), 100)
# grid the data.
zi = griddata(x, y, z, xi, yi)
# contour the gridded data, plotting dots at the nonuniform data points.
CS = plt.contour(xi, yi, zi, 15, linewidths=0.5, colors='k')
CS = plt.contourf(xi, yi, zi, 15, cmap=plt.cm.jet)
plt.colorbar()
plt.title('Solution')
elif method == "default":
from numpy import array
import matplotlib.collections as collections
#import matplotlib.pyplot as plt
if axes is None:
from pylab import gca
axes = gca()
verts = []
vals = []
for t in lin.get_triangles():
triangle = tuple([tuple(v[n][:2]) for n in t])
val = sum([v[n][2] for n in t])
vals.append(val/3.)
verts.append(triangle)
verts = array(verts)
vals = array(vals)
if just_mesh:
lw = 1
else:
lw = 0
col = collections.PolyCollection(verts, linewidths=lw, antialiaseds=0)
col.set_array(vals)
#col.set_cmap(plt.cm.jet)
ax = axes
ax.add_collection(col)
ax.set_xlim(verts[:, :, 0].min(), verts[:, :, 0].max())
ax.set_ylim(verts[:, :, 1].min(), verts[:, :, 1].max())
ax.set_aspect("equal")
#plt.colorbar()
#plt.title('Solution')
else:
raise ValueError("Unknown method (%s)" % method)
def plot_sln_mayavi(sln, notebook=False):
"""
Plots the Solution() instance sln using Linearizer() and matplotlib.
Currently only a very simple version is implemented, that takes the
vertices from linearizer and interpolates them. More sophisticated version
should take the triangles.
"""
lin = Linearizer()
lin.process_solution(sln)
vert = lin.get_vertices()
triangles = lin.get_triangles()
from numpy import zeros
from enthought.mayavi import mlab
x = vert[:, 0]
y = vert[:, 1]
z = zeros(len(y))
t = vert[:, 2]
if notebook:
# the off screen rendering properly works only with VTK-5.2 or above:
mlab.options.offscreen = True
mlab.clf()
s = mlab.triangular_mesh(x, y, z, triangles, scalars=t)
mlab.view(0, 0)
# Below is a code that does exactly what the "View along the +Z axis"
# button does:
#scene = mlab.get_engine().current_scene.scene
#scene.camera.focal_point = [0, 0, 0]
#scene.camera.position = [0, 0, 1]
#scene.camera.view_up = [0, 1, 0]
#scene.renderer.reset_camera()
#scene.render()
# the above looks ok, but there is still quite a large margin, so we prefer
# to just call .view(0, 0), which seems to be working fine.
return s
def plot_hermes_mesh_mpl(mesh, space=None, edges_only=False):
if space is None:
polynomial_orders = None
else:
polynomial_orders = mesh.get_elements_order(space)
return plot_mesh_mpl(mesh.nodes_dict, mesh.elements,
mesh.get_polygonal_boundary(),
polynomial_orders=polynomial_orders,
edges_only=edges_only)
def plot_mesh_mpl(nodes, elements, polygons=None,
polynomial_orders=None, edges_only=False):
from matplotlib import pyplot
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.patches import Rectangle
colors_old = {
1: '#000684',
2: '#3250fc',
3: '#36c4ee',
4: '#04eabc',
5: '#62ff2a',
6: '#fdff07',
7: '#ffa044',
8: '#ff1111',
9: '#b02c2c',
10: '#820f97',
}
colors = {
0: '#7f7f7f',
1: '#7f2aff',
2: '#2a2aff',
3: '#2a7fff',
4: '#00d4aa',
5: '#00aa44',
6: '#abc837',
7: '#ffd42a',
8: '#c87137',
9: '#c83737',
10: '#ff0000',
}
fig = pyplot.figure()
sp = fig.add_subplot(111)
for el_id in polygons:
x = list(polygons[el_id][:, 0])
y = list(polygons[el_id][:, 1])
x.append(x[0])
y.append(y[0])
vertices = zip(x, y)
codes = [Path.MOVETO] + [Path.LINETO]*(len(vertices)-2) + \
[Path.CLOSEPOLY]
p = Path(vertices, codes)
if edges_only:
color = "white"
linewidth = 2
else:
if polynomial_orders is None:
color = colors[0]
else:
color = colors[polynomial_orders[el_id]]
linewidth = 1
patch = PathPatch(p, facecolor=color, lw=linewidth,
edgecolor='#000000')
sp.add_patch(patch)
show_legend = polynomial_orders is not None
if show_legend:
# Create legend
def split_nodes():
x = []
y = []
if isinstance(nodes, dict):
_nodes = nodes.items()
else:
_nodes = enumerate(nodes)
for k, pnt in _nodes:
x.append(pnt[0])
y.append(pnt[1])
return (x, y)
def get_max(what='x'):
x, y = split_nodes()
if what == 'x':
return max(x)
else:
return max(y)
def get_min(what='x'):
x, y = split_nodes()
if what == 'x':
return min(x)
else:
return min(y)
maxX = get_max('x')
maxY = get_max('y')
minX = get_min('x')
minY = get_min('y')
dy = (maxY - minY) / 20
dx = (maxX - minX) / 20
y = minY + dy
x = maxX + dx
ord = polynomial_orders.items()
order_list = []
for k,v in ord:
order_list.append(v)
m = max(order_list)
for k,c in colors.items():
if k <= m :
p = Rectangle(xy=(x,y), width=dx, height=dy, fill=True, facecolor=c)
sp.add_patch(p)
sp.text(x + dx + (dx/2), y + (dy/4), str(k))
y += dy
else:
break
sp.text(x, y + (dy/2), str('Orders'))
sp.set_title("Mesh")
sp.set_aspect("equal")
sp.autoscale_view()
return sp.figure
class ScalarView(object):
def __init__(self, name="Solution", x=0, y=0, w=50, h=50):
self._name = name
self._lib = None
self._notebook = False
self._glut_view = None
def show_scale(self, *args):
pass
def show_mesh(self, *args):
pass
def wait(self):
if self._lib == "mpl" and self._notebook == False:
import pylab
pylab.show()
def show(self, sln, show=True, lib="mayavi", notebook=None,
filename="scalar.png", **options):
"""
Shows the solution.
show ... should it actually plot the window? Set to False in tests.
lib .... which library to use for the plotting? either "mpl" or "mayavi"
notebook ... are we running inside Sage notebook? If True, just save
the image to a.png
filename ... the name of the filename if we are saving the image (e.g.
notebook == False)
Example:
>>> 1 + 1
2
>>> 1 + 2
3
"""
if notebook is None:
try:
from sagenb.misc.support import EMBEDDED_MODE
except ImportError:
EMBEDDED_MODE = False
notebook = EMBEDDED_MODE
self._lib = lib
self._notebook = notebook
if lib == "glut":
if self._glut_view is None:
from _hermes2d import ScalarView
self._glut_view = ScalarView(self._name)
self._glut_view.show(sln)
elif lib == "mpl":
plot_sln_mpl(sln, **options)
import pylab
if show:
if notebook:
pylab.savefig(filename)
else:
pylab.ion()
pylab.draw()
pylab.ioff()
elif lib == "mayavi":
plot_sln_mayavi(sln, notebook=notebook)
from enthought.mayavi import mlab
if show:
engine = mlab.get_engine()
image = engine.current_scene
image.scene.background = (1.0, 1.0, 1.0)
image.scene.foreground = (0.0, 0.0, 0.0)
#mlab.colorbar(orientation="vertical")
if notebook:
mlab.savefig(filename)
else:
mlab.show()
else:
raise NotImplementedError("Unknown library '%s'" % lib)
class MeshView(object):
def __init__(self, name="Solution", x=0, y=0, w=500, h=500):
self._name = name
self._x = x
self._y = y
self._w = w
self._h = h
def wait(self):
pass
def show(self, mesh, show=True, lib="mpl", notebook=None, space=None,
filename="mesh.png", **options):
if notebook is None:
try:
from sagenb.misc.support import EMBEDDED_MODE
except ImportError:
EMBEDDED_MODE = False
notebook = EMBEDDED_MODE
if lib == "glut":
from _hermes2d import MeshView
m = MeshView(self._name, self._x, self._y, self._w, self._h)
m.show(mesh)
m.wait()
elif lib == "mpl":
p = plot_hermes_mesh_mpl(mesh, space=space, **options)
if show:
if notebook:
p.savefig(filename)
else:
p.show()
import pylab
pylab.show()
return p
else:
raise NotImplementedError("Unknown library '%s'" % lib)
| gpl-2.0 |
NelisVerhoef/scikit-learn | sklearn/svm/tests/test_svm.py | 70 | 31674 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
wzbozon/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
junwucs/h2o-3 | h2o-py/h2o/model/model_base.py | 1 | 20203 | """
This module implements the base model class. All model things inherit from this class.
"""
import h2o
from . import H2OFrame
from . import H2OConnection
class ModelBase(object):
def __init__(self, dest_key, model_json, metrics_class):
self._id = dest_key
self._model_json = model_json
self._metrics_class = metrics_class
self._is_xvalidated=False
self._xval_keys=None
if dest_key is not None and model_json is not None and metrics_class is not None:
# build Metric objects out of each metrics
for metric in ["training_metrics", "validation_metrics", "cross_validation_metrics"]:
if metric in model_json["output"]:
if model_json["output"][metric] is not None:
if metric=="cross_validation_metrics":
self._is_xvalidated=True
model_json["output"][metric] = metrics_class(model_json["output"][metric],metric,model_json["algo"])
if self._is_xvalidated: self._xval_keys= [i["name"] for i in model_json["output"]["cross_validation_models"]]
# build a useful dict of the params
self._params={}
for p in self._model_json["parameters"]: self._params[p["label"]]=p
@property
def id(self):
"""
:return: Retrieve this model's identifier.
"""
return self._id
@property
def params(self):
"""
Get the parameters and the actual/default values only.
:return: A dictionary of parameters used to build this model.
"""
params = {}
for p in self._params:
params[p] = {"default":self._params[p]["default_value"], "actual":self._params[p]["actual_value"]}
return params
@property
def full_parameters(self):
"""
Get the full specification of all parameters.
:return: a dictionary of parameters used to build this model.
"""
return self._params
def __repr__(self):
self.show()
return ""
def predict(self, test_data):
"""
Predict on a dataset.
:param test_data: Data to be predicted on.
:return: A new H2OFrame filled with predictions.
"""
if not isinstance(test_data, H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
test_data._eager()
j = H2OConnection.post_json("Predictions/models/" + self._id + "/frames/" + test_data._id)
prediction_frame_id = j["model_metrics"][0]["predictions"]["frame_id"]["name"]
return h2o.get_frame(prediction_frame_id)
def is_cross_validated(self):
"""
:return: True if the model was cross-validated.
"""
return self._is_xvalidated
def xval_keys(self):
"""
:return: The model keys for the cross-validated model.
"""
return self._xval_keys
def get_xval_models(self,key=None):
"""
Return a Model object.
:param key: If None, return all cross-validated models; otherwise return the model that key points to.
:return: A model or list of models.
"""
return h2o.get_model(key) if key is not None else [h2o.get_model(k) for k in self._xval_keys]
@property
def xvals(self):
"""
Return a list of the cross-validated models.
:return: A list of models
"""
return self.get_xval_models()
def deepfeatures(self, test_data, layer):
"""
Return hidden layer details
:param test_data: Data to create a feature space on
:param layer: 0 index hidden layer
"""
if test_data is None: raise ValueError("Must specify test data")
test_data._eager()
j = H2OConnection.post_json("Predictions/models/" + self._id + "/frames/" + test_data._id, deep_features_hidden_layer=layer)
return h2o.get_frame(j["predictions_frame"]["name"])
def weights(self, matrix_id=0):
"""
Return the frame for the respective weight matrix
:param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:return: an H2OFrame which represents the weight matrix identified by matrix_id
"""
num_weight_matrices = len(self._model_json['output']['weights'])
if matrix_id not in range(num_weight_matrices):
raise ValueError("Weight matrix does not exist. Model has {0} weight matrices (0-based indexing), but matrix {1} "
"was requested.".format(num_weight_matrices, matrix_id))
return h2o.get_frame(self._model_json['output']['weights'][matrix_id]['URL'].split('/')[3])
def biases(self, vector_id=0):
"""
Return the frame for the respective bias vector
:param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
:return: an H2OFrame which represents the bias vector identified by vector_id
"""
num_bias_vectors = len(self._model_json['output']['biases'])
if vector_id not in range(num_bias_vectors):
raise ValueError("Bias vector does not exist. Model has {0} bias vectors (0-based indexing), but vector {1} "
"was requested.".format(num_bias_vectors, vector_id))
return h2o.get_frame(self._model_json['output']['biases'][vector_id]['URL'].split('/')[3])
def model_performance(self, test_data=None, train=False, valid=False):
"""
Generate model metrics for this model on test_data.
:param test_data: Data set for which model metrics shall be computed against. Both train and valid arguments are ignored if test_data is not None.
:param train: Report the training metrics for the model. If the test_data is the training data, the training metrics are returned.
:param valid: Report the validation metrics for the model. If train and valid are True, then it defaults to True.
:return: An object of class H2OModelMetrics.
"""
if test_data is None:
if not train and not valid: train = True # default to train
if train: return self._model_json["output"]["training_metrics"]
if valid: return self._model_json["output"]["validation_metrics"]
else: # cases dealing with test_data not None
if not isinstance(test_data, H2OFrame):
raise ValueError("`test_data` must be of type H2OFrame. Got: " + type(test_data))
test_data._eager()
res = H2OConnection.post_json("ModelMetrics/models/" + self._id + "/frames/" + test_data._id)
# FIXME need to do the client-side filtering... PUBDEV-874: https://0xdata.atlassian.net/browse/PUBDEV-874
raw_metrics = None
for mm in res["model_metrics"]:
if mm["frame"]["name"] == test_data._id:
raw_metrics = mm
break
return self._metrics_class(raw_metrics,algo=self._model_json["algo"])
def score_history(self):
"""
Retrieve Model Score History
:return: the score history (H2OTwoDimTable)
"""
model = self._model_json["output"]
if 'scoring_history' in model.keys() and model["scoring_history"] != None:
s = model["scoring_history"]
if h2o.can_use_pandas():
import pandas
pandas.options.display.max_rows = 20
return pandas.DataFrame(s.cell_values,columns=s.col_header)
return model["scoring_history"]
else: print "No score history for this model"
def summary(self):
"""
Print a detailed summary of the model.
:return:
"""
model = self._model_json["output"]
if model["model_summary"]:
model["model_summary"].show() # H2OTwoDimTable object
def show(self):
"""
Print innards of model, without regards to type
:return: None
"""
model = self._model_json["output"]
print "Model Details"
print "============="
print self.__class__.__name__, ": ", self._model_json["algo_full_name"]
print "Model Key: ", self._id
self.summary()
print
# training metrics
tm = model["training_metrics"]
if tm: tm.show()
vm = model["validation_metrics"]
if vm: vm.show()
xm = model["cross_validation_metrics"]
if xm: xm.show()
if "scoring_history" in model.keys() and model["scoring_history"]: model["scoring_history"].show()
if "variable_importances" in model.keys() and model["variable_importances"]: model["variable_importances"].show()
def varimp(self, return_list=False):
"""
Pretty print the variable importances, or return them in a list
:param return_list: if True, then return the variable importances in an list (ordered from most important to least
important). Each entry in the list is a 4-tuple of (variable, relative_importance, scaled_importance, percentage).
:return: None or ordered list
"""
model = self._model_json["output"]
if "variable_importances" in model.keys() and model["variable_importances"]:
if not return_list: return model["variable_importances"].show()
else: return model["variable_importances"].cell_values
else:
print "Warning: This model doesn't have variable importances"
def residual_deviance(self,train=False,valid=False,xval=False):
"""
Retreive the residual deviance if this model has the attribute, or None otherwise.
:param train: Get the residual deviance for the training set. If both train and valid are False, then train is selected by default.
:param valid: Get the residual deviance for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the residual deviance, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].residual_deviance() if train else self._model_json["output"]["validation_metrics"].residual_deviance()
def residual_degrees_of_freedom(self,train=False,valid=False,xval=False):
"""
Retreive the residual degress of freedom if this model has the attribute, or None otherwise.
:param train: Get the residual dof for the training set. If both train and valid are False, then train is selected by default.
:param valid: Get the residual dof for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the residual dof, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].residual_degrees_of_freedom() if train else self._model_json["output"]["validation_metrics"].residual_degrees_of_freedom()
def null_deviance(self,train=False,valid=False,xval=False):
"""
Retreive the null deviance if this model has the attribute, or None otherwise.
:param: train Get the null deviance for the training set. If both train and valid are False, then train is selected by default.
:param: valid Get the null deviance for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the null deviance, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].null_deviance() if train else self._model_json["output"]["validation_metrics"].null_deviance()
def null_degrees_of_freedom(self,train=False,valid=False,xval=False):
"""
Retreive the null degress of freedom if this model has the attribute, or None otherwise.
:param train: Get the null dof for the training set. If both train and valid are False, then train is selected by default.
:param valid: Get the null dof for the validation set. If both train and valid are True, then train is selected by default.
:return: Return the null dof, or None if it is not present.
"""
if xval: raise ValueError("Cross-validation metrics are not available.")
if not train and not valid: train = True
if train and valid: train = True
return self._model_json["output"]["training_metrics"].null_degrees_of_freedom() if train else self._model_json["output"]["validation_metrics"].null_degrees_of_freedom()
def pprint_coef(self):
"""
Pretty print the coefficents table (includes normalized coefficients)
:return: None
"""
print self._model_json["output"]["coefficients_table"] # will return None if no coefs!
def coef(self):
"""
:return: Return the coefficients for this model.
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None: return None
tbl = tbl.cell_values
return {a[0]:a[1] for a in tbl}
def coef_norm(self):
"""
:return: Return the normalized coefficients
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None: return None
tbl = tbl.cell_values
return {a[0]:a[2] for a in tbl}
def r2(self, train=False, valid=False, xval=False):
"""
Return the R^2 for this regression model.
The R^2 value is defined to be 1 - MSE/var,
where var is computed as sigma*sigma.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the R^2 value for the training data.
:param valid: If valid is True, then return the R^2 value for the validation data.
:param xval: If xval is True, then return the R^2 value for the cross validation data.
:return: The R^2 for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.r2()
return m.values()[0] if len(m) == 1 else m
def mse(self, train=False, valid=False, xval=False):
"""
Get the MSE(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the MSE value for the training data.
:param valid: If valid is True, then return the MSE value for the validation data.
:param xval: If xval is True, then return the MSE value for the cross validation data.
:return: The MSE for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.mse()
return m.values()[0] if len(m) == 1 else m
def logloss(self, train=False, valid=False, xval=False):
"""
Get the Log Loss(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the Log Loss value for the training data.
:param valid: If valid is True, then return the Log Loss value for the validation data.
:param xval: If xval is True, then return the Log Loss value for the cross validation data.
:return: The Log Loss for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.logloss()
return m.values()[0] if len(m) == 1 else m
def mean_residual_deviance(self, train=False, valid=False, xval=False):
"""
Get the Mean Residual Deviances(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the Mean Residual Deviance value for the training data.
:param valid: If valid is True, then return the Mean Residual Deviance value for the validation data.
:param xval: If xval is True, then return the Mean Residual Deviance value for the cross validation data.
:return: The Mean Residual Deviance for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.mean_residual_deviance()
return m.values()[0] if len(m) == 1 else m
def auc(self, train=False, valid=False, xval=False):
"""
Get the AUC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the AUC value for the training data.
:param valid: If valid is True, then return the AUC value for the validation data.
:param xval: If xval is True, then return the AUC value for the validation data.
:return: The AUC.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.auc()
return m.values()[0] if len(m) == 1 else m
def aic(self, train=False, valid=False, xval=False):
"""
Get the AIC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the AIC value for the training data.
:param valid: If valid is True, then return the AIC value for the validation data.
:param xval: If xval is True, then return the AIC value for the validation data.
:return: The AIC.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.aic()
return m.values()[0] if len(m) == 1 else m
def giniCoef(self, train=False, valid=False, xval=False):
"""
Get the Gini Coefficient(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the Gini Coefficient value for the training data.
:param valid: If valid is True, then return the Gini Coefficient value for the validation data.
:param xval: If xval is True, then return the Gini Coefficient value for the cross validation data.
:return: The Gini Coefficient for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.giniCoef()
return m.values()[0] if len(m) == 1 else m
def download_pojo(self,path=""):
"""
Download the POJO for this model to the directory specified by path (no trailing slash!).
If path is "", then dump to screen.
:param model: Retrieve this model's scoring POJO.
:param path: An absolute path to the directory where POJO should be saved.
:return: None
"""
h2o.download_pojo(self,path) # call the "package" function
@staticmethod
def _get_metrics(o, train, valid, xval):
metrics = {}
if train: metrics["train"] = o._model_json["output"]["training_metrics"]
if valid: metrics["valid"] = o._model_json["output"]["validation_metrics"]
if xval : metrics["xval"] = o._model_json["output"]["cross_validation_metrics"]
if len(metrics) == 0: metrics["train"] = o._model_json["output"]["training_metrics"]
return metrics
# Delete from cluster as model goes out of scope
# def __del__(self):
# h2o.remove(self._id)
@staticmethod
def _has(dictionary, key):
return key in dictionary and dictionary[key] is not None
@staticmethod
def _check_targets(y_actual, y_predicted):
"""
Check that y_actual and y_predicted have the same length.
:param y_actual: An H2OFrame
:param y_predicted: An H2OFrame
:return: None
"""
if len(y_actual) != len(y_predicted):
raise ValueError("Row mismatch: [{},{}]".format(len(y_actual),len(y_predicted)))
| apache-2.0 |
IamJeffG/geopandas | geopandas/tests/test_plotting.py | 1 | 10101 | from __future__ import absolute_import, division
import numpy as np
import os
import shutil
import tempfile
import matplotlib
matplotlib.use('Agg', warn=False)
from matplotlib.pyplot import Artist, savefig, clf, cm, get_cmap
from matplotlib.testing.noseclasses import ImageComparisonFailure
from matplotlib.testing.compare import compare_images
from numpy import cos, sin, pi
from shapely.geometry import Polygon, LineString, Point
from six.moves import xrange
from .util import unittest
from geopandas import GeoSeries, GeoDataFrame, read_file
# If set to True, generate images rather than perform tests (all tests will pass!)
GENERATE_BASELINE = False
BASELINE_DIR = os.path.join(os.path.dirname(__file__), 'baseline_images', 'test_plotting')
TRAVIS = bool(os.environ.get('TRAVIS', False))
class TestImageComparisons(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tempdir = tempfile.mkdtemp()
return
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tempdir)
return
def _compare_images(self, ax, filename, tol=10):
""" Helper method to do the comparisons """
assert isinstance(ax, Artist)
if GENERATE_BASELINE:
savefig(os.path.join(BASELINE_DIR, filename))
savefig(os.path.join(self.tempdir, filename))
err = compare_images(os.path.join(BASELINE_DIR, filename),
os.path.join(self.tempdir, filename),
tol, in_decorator=True)
if err:
raise ImageComparisonFailure('images not close: %(actual)s '
'vs. %(expected)s '
'(RMS %(rms).3f)' % err)
def test_poly_plot(self):
""" Test plotting a simple series of polygons """
clf()
filename = 'poly_plot.png'
t1 = Polygon([(0, 0), (1, 0), (1, 1)])
t2 = Polygon([(1, 0), (2, 0), (2, 1)])
polys = GeoSeries([t1, t2])
ax = polys.plot()
self._compare_images(ax=ax, filename=filename)
def test_point_plot(self):
""" Test plotting a simple series of points """
clf()
filename = 'points_plot.png'
N = 10
points = GeoSeries(Point(i, i) for i in xrange(N))
ax = points.plot()
self._compare_images(ax=ax, filename=filename)
def test_line_plot(self):
""" Test plotting a simple series of lines """
clf()
filename = 'lines_plot.png'
N = 10
lines = GeoSeries([LineString([(0, i), (9, i)]) for i in xrange(N)])
ax = lines.plot()
self._compare_images(ax=ax, filename=filename)
@unittest.skipIf(TRAVIS, 'Skip on Travis (fails even though it passes locally)')
def test_plot_GeoDataFrame_with_kwargs(self):
"""
Test plotting a simple GeoDataFrame consisting of a series of polygons
with increasing values using various extra kwargs.
"""
clf()
filename = 'poly_plot_with_kwargs.png'
ts = np.linspace(0, 2*pi, 10, endpoint=False)
# Build GeoDataFrame from a series of triangles wrapping around in a ring
# and a second column containing a list of increasing values.
r1 = 1.0 # radius of inner ring boundary
r2 = 1.5 # radius of outer ring boundary
def make_triangle(t0, t1):
return Polygon([(r1*cos(t0), r1*sin(t0)),
(r2*cos(t0), r2*sin(t0)),
(r1*cos(t1), r1*sin(t1))])
polys = GeoSeries([make_triangle(t0, t1) for t0, t1 in zip(ts, ts[1:])])
values = np.arange(len(polys))
df = GeoDataFrame({'geometry': polys, 'values': values})
# Plot the GeoDataFrame using various keyword arguments to see if they are honoured
ax = df.plot(column='values', cmap=cm.RdBu, vmin=+2, vmax=None, figsize=(8, 4))
self._compare_images(ax=ax, filename=filename)
class TestPointPlotting(unittest.TestCase):
def setUp(self):
self.N = 10
self.points = GeoSeries(Point(i, i) for i in range(self.N))
values = np.arange(self.N)
self.df = GeoDataFrame({'geometry': self.points, 'values': values})
def test_default_colors(self):
## without specifying values -> max 9 different colors
# GeoSeries
ax = self.points.plot()
cmap = get_cmap('Set1', 9)
expected_colors = cmap(list(range(9))*2)
_check_colors(ax.get_lines(), expected_colors)
# GeoDataFrame -> uses 'jet' instead of 'Set1'
ax = self.df.plot()
cmap = get_cmap('jet', 9)
expected_colors = cmap(list(range(9))*2)
_check_colors(ax.get_lines(), expected_colors)
## with specifying values
ax = self.df.plot(column='values')
cmap = get_cmap('jet')
expected_colors = cmap(np.arange(self.N)/(self.N-1))
_check_colors(ax.get_lines(), expected_colors)
def test_colormap(self):
## without specifying values -> max 9 different colors
# GeoSeries
ax = self.points.plot(cmap='RdYlGn')
cmap = get_cmap('RdYlGn', 9)
expected_colors = cmap(list(range(9))*2)
_check_colors(ax.get_lines(), expected_colors)
# GeoDataFrame -> same as GeoSeries in this case
ax = self.df.plot(cmap='RdYlGn')
_check_colors(ax.get_lines(), expected_colors)
## with specifying values
ax = self.df.plot(column='values', cmap='RdYlGn')
cmap = get_cmap('RdYlGn')
expected_colors = cmap(np.arange(self.N)/(self.N-1))
_check_colors(ax.get_lines(), expected_colors)
def test_single_color(self):
ax = self.points.plot(color='green')
_check_colors(ax.get_lines(), ['green']*self.N)
ax = self.df.plot(color='green')
_check_colors(ax.get_lines(), ['green']*self.N)
ax = self.df.plot(column='values', color='green')
_check_colors(ax.get_lines(), ['green']*self.N)
def test_style_kwargs(self):
# markersize
ax = self.points.plot(markersize=10)
ms = [l.get_markersize() for l in ax.get_lines()]
assert ms == [10] * self.N
ax = self.df.plot(markersize=10)
ms = [l.get_markersize() for l in ax.get_lines()]
assert ms == [10] * self.N
ax = self.df.plot(column='values', markersize=10)
ms = [l.get_markersize() for l in ax.get_lines()]
assert ms == [10] * self.N
class TestLineStringPlotting(unittest.TestCase):
def setUp(self):
self.N = 10
values = np.arange(self.N)
self.lines = GeoSeries([LineString([(0, i), (9, i)]) for i in xrange(self.N)])
self.df = GeoDataFrame({'geometry': self.lines, 'values': values})
def test_single_color(self):
ax = self.lines.plot(color='green')
_check_colors(ax.get_lines(), ['green']*self.N)
ax = self.df.plot(color='green')
_check_colors(ax.get_lines(), ['green']*self.N)
ax = self.df.plot(column='values', color='green')
_check_colors(ax.get_lines(), ['green']*self.N)
def test_style_kwargs(self):
# linestyle
ax = self.lines.plot(linestyle='dashed')
ls = [l.get_linestyle() for l in ax.get_lines()]
assert ls == ['--'] * self.N
ax = self.df.plot(linestyle='dashed')
ls = [l.get_linestyle() for l in ax.get_lines()]
assert ls == ['--'] * self.N
ax = self.df.plot(column='values', linestyle='dashed')
ls = [l.get_linestyle() for l in ax.get_lines()]
assert ls == ['--'] * self.N
class TestPolygonPlotting(unittest.TestCase):
def setUp(self):
t1 = Polygon([(0, 0), (1, 0), (1, 1)])
t2 = Polygon([(1, 0), (2, 0), (2, 1)])
self.polys = GeoSeries([t1, t2])
self.df = GeoDataFrame({'geometry': self.polys, 'values': [0, 1]})
return
def test_single_color(self):
ax = self.polys.plot(color='green')
_check_colors(ax.patches, ['green']*2, alpha=0.5)
ax = self.df.plot(color='green')
_check_colors(ax.patches, ['green']*2, alpha=0.5)
ax = self.df.plot(column='values', color='green')
_check_colors(ax.patches, ['green']*2, alpha=0.5)
def test_vmin_vmax(self):
# when vmin == vmax, all polygons should be the same color
ax = self.df.plot(column='values', categorical=True, vmin=0, vmax=0)
cmap = get_cmap('Set1', 2)
self.assertEqual(ax.patches[0].get_facecolor(), ax.patches[1].get_facecolor())
def test_facecolor(self):
t1 = Polygon([(0, 0), (1, 0), (1, 1)])
t2 = Polygon([(1, 0), (2, 0), (2, 1)])
polys = GeoSeries([t1, t2])
df = GeoDataFrame({'geometry': polys, 'values': [0, 1]})
ax = polys.plot(facecolor='k')
_check_colors(ax.patches, ['k']*2, alpha=0.5)
class TestPySALPlotting(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
import pysal as ps
except ImportError:
raise unittest.SkipTest("PySAL is not installed")
pth = ps.examples.get_path("columbus.shp")
cls.tracts = read_file(pth)
def test_legend(self):
ax = self.tracts.plot(column='CRIME', scheme='QUANTILES', k=3,
cmap='OrRd', legend=True)
labels = [t.get_text() for t in ax.get_legend().get_texts()]
expected = [u'0.00 - 26.07', u'26.07 - 41.97', u'41.97 - 68.89']
self.assertEqual(labels, expected)
def _check_colors(collection, expected_colors, alpha=None):
from matplotlib.lines import Line2D
import matplotlib.colors as colors
conv = colors.colorConverter
for patch, color in zip(collection, expected_colors):
if isinstance(patch, Line2D):
# points/lines
result = patch.get_color()
else:
# polygons
result = patch.get_facecolor()
assert conv.to_rgba(result) == conv.to_rgba(color, alpha=alpha)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/feature_selection/rfe.py | 7 | 16859 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..utils.validation import check_is_fitted
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..model_selection._validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer):
"""
Return the score for a fit across one fold.
"""
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
return rfe._fit(
X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer)).scores_
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and the importance of each feature is obtained either through a
``coef_`` attribute or through a ``feature_importances_`` attribute.
Then, the least important features are pruned from current set of features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a ``fit`` method that provides
information about feature importance either through a ``coef_``
attribute or through a ``feature_importances_`` attribute.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
# Parameter step_score controls the calculation of self.scores_
# step_score is not exposed to users
# and is used when implementing RFECV
# self.scores_ will not be calculated when calling _fit through fit
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
else:
coefs = getattr(estimator, 'feature_importances_', None)
if coefs is None:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
check_is_fitted(self, 'support_')
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
check_is_fitted(self, 'estimator_')
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
check_is_fitted(self, 'estimator_')
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
check_is_fitted(self, 'estimator_')
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a ``fit`` method that provides
information about feature importance either through a ``coef_``
attribute or through a ``feature_importances_`` attribute.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If the
estimator is a classifier or if ``y`` is neither binary nor multiclass,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int, default=0
Controls verbosity of output.
n_jobs : int, default 1
Number of cores to run in parallel while fitting across folds.
Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set
to number of cores.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None, verbose=0,
n_jobs=1):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.verbose = verbose
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
cv = check_cv(self.cv, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, verbose=self.verbose)
# Determine the number of subsets of features by fitting across
# the train folds and choosing the "features_to_select" parameter
# that gives the least averaged error across all folds.
# Note that joblib raises a non-picklable error for bound methods
# even if n_jobs is set to 1 with the default multiprocessing
# backend.
# This branching is done so that to
# make sure that user code that sets n_jobs to 1
# and provides bound methods as scorers is not broken with the
# addition of n_jobs parameter in version 0.18.
if self.n_jobs == 1:
parallel, func = list, _rfe_single_fit
else:
parallel, func, = Parallel(n_jobs=self.n_jobs), delayed(_rfe_single_fit)
scores = parallel(
func(rfe, self.estimator, X, y, train, test, scorer)
for train, test in cv.split(X, y))
scores = np.sum(scores, axis=0)
n_features_to_select = max(
n_features - (np.argmax(scores) * step),
n_features_to_select)
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select, step=self.step)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to get_n_splits(X, y) - 1
# here, the scores are normalized by get_n_splits(X, y)
self.grid_scores_ = scores[::-1] / cv.get_n_splits(X, y)
return self
| mit |
jasonleaster/Machine_Learning | Adaboost/adaboost.py | 1 | 3667 | """
Programmer : EOF
Cooperator : Wei Chen.
Date : 2015.11.22
File : adaboost.py
File Description:
AdaBoost is a machine learning meta-algorithm.
That is the short for "Adaptive Boosting".
Thanks Wei Chen. Without him, I can't understand AdaBoost in this short time. We help each other and learn this algorithm.
"""
import numpy
from decisionStump import *
import matplotlib.pyplot as pyplot
class AdaBoost:
def __init__(self, Mat, Tag, WeakerClassifier = DecisionStump):
"""
self._Mat: A matrix which store the samples. Every column
vector in this matrix is a point of sample.
self._Tag:
self.W: A vecter which is the weight of weaker classifier
self.N: A number which descripte how many weaker classifier
is enough for solution.
"""
self._Mat = numpy.array(Mat) * 1.0
self._Tag = numpy.array(Tag) * 1.0
self.SamplesDem = self._Mat.shape[0]
self.SamplesNum = self._Mat.shape[1]
# Make sure that the inputed data's demention is right.
assert self.SamplesNum == self._Tag.size
self.Weaker = WeakerClassifier
# Initialization of weight
self.W = [1.0/self.SamplesNum for i in range(self.SamplesNum)]
self.N = 0
self.G = {}
self.alpha = {}
self.accuracy = []
def is_good_enough(self):
output = numpy.zeros((self.SamplesNum, 1))
for i in range(self.N+1):
output += self.G[i].prediction(self._Mat) * self.alpha[i]
output = numpy.sign(output)
output = output.flatten()
e = numpy.count_nonzero(output ==self._Tag)/(self.SamplesNum*1.)
self.accuracy.append( e )
if output.tolist() == self._Tag.tolist():
return True
else:
return False
def train(self, M = 4):
"""
function @train() is the main process which run
AdaBoost algorithm.
@M : Upper bound weaker classifier. How many weaker
classifier will be used to construct a strong
classifier.
"""
for m in range(M):
self.G[m] = self.Weaker(self._Mat, self._Tag, self.W)
self.G[m].train()
errorRate = self.G[m].opt_errorRate
self.alpha[m] = 0.5 * numpy.log((1-errorRate)/errorRate)
output = self.G[m].prediction(self._Mat)
if self.is_good_enough():
print (self.N + 1) ," weak classifier is enough to ",\
"classify the inputed sample points"
print "Training Done :)"
break
Z = 0.0
for i in range(self.SamplesNum):
Z += self.W[i] * numpy.exp(-self.alpha[m] * self._Tag[i] * output[i])
for i in range(self.SamplesNum):
self.W[i] = (self.W[i] / Z) * numpy.exp(-self.alpha[m] * self._Tag[i] * output[i])
self.N += 1
if self.accuracy[self.N-1] > 0.97 or self.N > 100:
self.showErrRates()
return
def prediction(self, Mat):
Mat = numpy.array(Mat)
output = numpy.zeros((Mat.shape[1], 1))
for i in range(self.N + 1):
output += self.G[i].prediction(Mat) * self.alpha[i]
#print output
output = numpy.sign(output)
return output
def showErrRates(self):
pyplot.title("The changes of accuracy (Figure by Jason Leaster)")
pyplot.xlabel("Iteration times")
pyplot.ylabel("Accuracy of Prediction")
pyplot.plot([i for i in range(self.N)], self.accuracy, '-.', label = "Accuracy * 100%")
pyplot.show()
| gpl-2.0 |
rcrowder/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/scale.py | 69 | 13414 | import textwrap
import numpy as np
from numpy import ma
MaskedArray = ma.MaskedArray
from cbook import dedent
from ticker import NullFormatter, ScalarFormatter, LogFormatterMathtext, Formatter
from ticker import NullLocator, LogLocator, AutoLocator, SymmetricalLogLocator, FixedLocator
from transforms import Transform, IdentityTransform
class ScaleBase(object):
"""
The base class for all scales.
Scales are separable transformations, working on a single dimension.
Any subclasses will want to override:
- :attr:`name`
- :meth:`get_transform`
And optionally:
- :meth:`set_default_locators_and_formatters`
- :meth:`limit_range_for_scale`
"""
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` object
associated with this scale.
"""
raise NotImplementedError
def set_default_locators_and_formatters(self, axis):
"""
Set the :class:`~matplotlib.ticker.Locator` and
:class:`~matplotlib.ticker.Formatter` objects on the given
axis to match this scale.
"""
raise NotImplementedError
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Returns the range *vmin*, *vmax*, possibly limited to the
domain supported by this scale.
*minpos* should be the minimum positive value in the data.
This is used by log scales to determine a minimum value.
"""
return vmin, vmax
class LinearScale(ScaleBase):
"""
The default linear scale.
"""
name = 'linear'
def __init__(self, axis, **kwargs):
pass
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to reasonable defaults for
linear scaling.
"""
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_locator(NullLocator())
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
The transform for linear scaling is just the
:class:`~matplotlib.transforms.IdentityTransform`.
"""
return IdentityTransform()
def _mask_non_positives(a):
"""
Return a Numpy masked array where all non-positive values are
masked. If there are no non-positive values, the original array
is returned.
"""
mask = a <= 0.0
if mask.any():
return ma.MaskedArray(a, mask=mask)
return a
class LogScale(ScaleBase):
"""
A standard logarithmic scale. Care is taken so non-positive
values are not plotted.
For computational efficiency (to push as much as possible to Numpy
C code in the common cases), this scale provides different
transforms depending on the base of the logarithm:
- base 10 (:class:`Log10Transform`)
- base 2 (:class:`Log2Transform`)
- base e (:class:`NaturalLogTransform`)
- arbitrary base (:class:`LogTransform`)
"""
name = 'log'
class Log10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 10.0
def transform(self, a):
a = _mask_non_positives(a * 10.0)
if isinstance(a, MaskedArray):
return ma.log10(a)
return np.log10(a)
def inverted(self):
return LogScale.InvertedLog10Transform()
class InvertedLog10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 10.0
def transform(self, a):
return ma.power(10.0, a) / 10.0
def inverted(self):
return LogScale.Log10Transform()
class Log2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 2.0
def transform(self, a):
a = _mask_non_positives(a * 2.0)
if isinstance(a, MaskedArray):
return ma.log(a) / np.log(2)
return np.log2(a)
def inverted(self):
return LogScale.InvertedLog2Transform()
class InvertedLog2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 2.0
def transform(self, a):
return ma.power(2.0, a) / 2.0
def inverted(self):
return LogScale.Log2Transform()
class NaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = np.e
def transform(self, a):
a = _mask_non_positives(a * np.e)
if isinstance(a, MaskedArray):
return ma.log(a)
return np.log(a)
def inverted(self):
return LogScale.InvertedNaturalLogTransform()
class InvertedNaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = np.e
def transform(self, a):
return ma.power(np.e, a) / np.e
def inverted(self):
return LogScale.NaturalLogTransform()
class LogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform(self, a):
a = _mask_non_positives(a * self.base)
if isinstance(a, MaskedArray):
return ma.log(a) / np.log(self.base)
return np.log(a) / np.log(self.base)
def inverted(self):
return LogScale.InvertedLogTransform(self.base)
class InvertedLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform(self, a):
return ma.power(self.base, a) / self.base
def inverted(self):
return LogScale.LogTransform(self.base)
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``
will place 10 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
subs = kwargs.pop('subsx', None)
else:
base = kwargs.pop('basey', 10.0)
subs = kwargs.pop('subsy', None)
if base == 10.0:
self._transform = self.Log10Transform()
elif base == 2.0:
self._transform = self.Log2Transform()
elif base == np.e:
self._transform = self.NaturalLogTransform()
else:
self._transform = self.LogTransform(base)
self.base = base
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
log scaling.
"""
axis.set_major_locator(LogLocator(self.base))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(LogLocator(self.base, self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`~matplotlib.transforms.Transform` instance
appropriate for the given logarithm base.
"""
return self._transform
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to positive values.
"""
return (vmin <= 0.0 and minpos or vmin,
vmax <= 0.0 and minpos or vmax)
class SymmetricalLogScale(ScaleBase):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
name = 'symlog'
class SymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base, linthresh):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self._log_base = np.log(base)
self._linadjust = (np.log(linthresh) / self._log_base) / linthresh
def transform(self, a):
a = np.asarray(a)
sign = np.sign(a)
masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
log = sign * ma.log(np.abs(masked)) / self._log_base
if masked.mask.any():
return np.asarray(ma.where(masked.mask,
a * self._linadjust,
log))
else:
return np.asarray(log)
def inverted(self):
return SymmetricalLogScale.InvertedSymmetricalLogTransform(self.base, self.linthresh)
class InvertedSymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base, linthresh):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self._log_base = np.log(base)
self._log_linthresh = np.log(linthresh) / self._log_base
self._linadjust = linthresh / (np.log(linthresh) / self._log_base)
def transform(self, a):
a = np.asarray(a)
return np.where(a <= self._log_linthresh,
np.where(a >= -self._log_linthresh,
a * self._linadjust,
-(np.power(self.base, -a))),
np.power(self.base, a))
def inverted(self):
return SymmetricalLogScale.SymmetricalLogTransform(self.base)
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*linthreshx*/*linthreshy*:
The range (-*x*, *x*) within which the plot is linear (to
avoid having the plot go to infinity around zero).
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``
will place 10 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
linthresh = kwargs.pop('linthreshx', 2.0)
subs = kwargs.pop('subsx', None)
else:
base = kwargs.pop('basey', 10.0)
linthresh = kwargs.pop('linthreshy', 2.0)
subs = kwargs.pop('subsy', None)
self._transform = self.SymmetricalLogTransform(base, linthresh)
self.base = base
self.linthresh = linthresh
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
symmetrical log scaling.
"""
axis.set_major_locator(SymmetricalLogLocator(self.get_transform()))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(SymmetricalLogLocator(self.get_transform(), self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`SymmetricalLogTransform` instance.
"""
return self._transform
_scale_mapping = {
'linear' : LinearScale,
'log' : LogScale,
'symlog' : SymmetricalLogScale
}
def get_scale_names():
names = _scale_mapping.keys()
names.sort()
return names
def scale_factory(scale, axis, **kwargs):
"""
Return a scale class by name.
ACCEPTS: [ %(names)s ]
"""
scale = scale.lower()
if scale is None:
scale = 'linear'
if scale not in _scale_mapping:
raise ValueError("Unknown scale type '%s'" % scale)
return _scale_mapping[scale](axis, **kwargs)
scale_factory.__doc__ = dedent(scale_factory.__doc__) % \
{'names': " | ".join(get_scale_names())}
def register_scale(scale_class):
"""
Register a new kind of scale.
*scale_class* must be a subclass of :class:`ScaleBase`.
"""
_scale_mapping[scale_class.name] = scale_class
def get_scale_docs():
"""
Helper function for generating docstrings related to scales.
"""
docs = []
for name in get_scale_names():
scale_class = _scale_mapping[name]
docs.append(" '%s'" % name)
docs.append("")
class_docs = dedent(scale_class.__init__.__doc__)
class_docs = "".join([" %s\n" %
x for x in class_docs.split("\n")])
docs.append(class_docs)
docs.append("")
return "\n".join(docs)
| agpl-3.0 |
NonVolatileComputing/arrow | python/pyarrow/tests/test_io.py | 1 | 10283 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from io import BytesIO
import gc
import os
import pytest
import sys
import numpy as np
import pandas as pd
from pyarrow.compat import u, guid
import pyarrow as pa
# ----------------------------------------------------------------------
# Python file-like objects
def test_python_file_write():
buf = BytesIO()
f = pa.PythonFile(buf)
assert f.tell() == 0
s1 = b'enga\xc3\xb1ado'
s2 = b'foobar'
f.write(s1.decode('utf8'))
assert f.tell() == len(s1)
f.write(s2)
expected = s1 + s2
result = buf.getvalue()
assert result == expected
f.close()
def test_python_file_read():
data = b'some sample data'
buf = BytesIO(data)
f = pa.PythonFile(buf, mode='r')
assert f.size() == len(data)
assert f.tell() == 0
assert f.read(4) == b'some'
assert f.tell() == 4
f.seek(0)
assert f.tell() == 0
f.seek(5)
assert f.tell() == 5
v = f.read(50)
assert v == b'sample data'
assert len(v) == 11
f.close()
def test_bytes_reader():
# Like a BytesIO, but zero-copy underneath for C++ consumers
data = b'some sample data'
f = pa.BufferReader(data)
assert f.tell() == 0
assert f.size() == len(data)
assert f.read(4) == b'some'
assert f.tell() == 4
f.seek(0)
assert f.tell() == 0
f.seek(5)
assert f.tell() == 5
assert f.read(50) == b'sample data'
f.close()
def test_bytes_reader_non_bytes():
with pytest.raises(ValueError):
pa.BufferReader(u('some sample data'))
def test_bytes_reader_retains_parent_reference():
import gc
# ARROW-421
def get_buffer():
data = b'some sample data' * 1000
reader = pa.BufferReader(data)
reader.seek(5)
return reader.read_buffer(6)
buf = get_buffer()
gc.collect()
assert buf.to_pybytes() == b'sample'
assert buf.parent is not None
# ----------------------------------------------------------------------
# Buffers
def test_buffer_bytes():
val = b'some data'
buf = pa.frombuffer(val)
assert isinstance(buf, pa.Buffer)
result = buf.to_pybytes()
assert result == val
def test_buffer_memoryview():
val = b'some data'
buf = pa.frombuffer(val)
assert isinstance(buf, pa.Buffer)
result = memoryview(buf)
assert result == val
def test_buffer_bytearray():
val = bytearray(b'some data')
buf = pa.frombuffer(val)
assert isinstance(buf, pa.Buffer)
result = bytearray(buf)
assert result == val
def test_buffer_numpy():
# Make sure creating a numpy array from an arrow buffer works
byte_array = bytearray(20)
byte_array[0] = 42
buf = pa.frombuffer(byte_array)
array = np.frombuffer(buf, dtype="uint8")
assert array[0] == byte_array[0]
assert array.base == buf
def test_allocate_buffer():
buf = pa.allocate_buffer(100)
assert buf.size == 100
assert buf.is_mutable
bit = b'abcde'
writer = pa.FixedSizeBufferWriter(buf)
writer.write(bit)
assert buf.to_pybytes()[:5] == bit
def test_buffer_memoryview_is_immutable():
val = b'some data'
buf = pa.frombuffer(val)
assert isinstance(buf, pa.Buffer)
result = memoryview(buf)
with pytest.raises(TypeError) as exc:
result[0] = b'h'
assert 'cannot modify read-only' in str(exc.value)
b = bytes(buf)
with pytest.raises(TypeError) as exc:
b[0] = b'h'
assert 'cannot modify read-only' in str(exc.value)
def test_memory_output_stream():
# 10 bytes
val = b'dataabcdef'
f = pa.BufferOutputStream()
K = 1000
for i in range(K):
f.write(val)
buf = f.get_result()
assert len(buf) == len(val) * K
assert buf.to_pybytes() == val * K
def test_inmemory_write_after_closed():
f = pa.BufferOutputStream()
f.write(b'ok')
f.get_result()
with pytest.raises(IOError):
f.write(b'not ok')
def test_buffer_protocol_ref_counting():
def make_buffer(bytes_obj):
return bytearray(pa.frombuffer(bytes_obj))
buf = make_buffer(b'foo')
gc.collect()
assert buf == b'foo'
# ARROW-1053
val = b'foo'
refcount_before = sys.getrefcount(val)
for i in range(10):
make_buffer(val)
gc.collect()
assert refcount_before == sys.getrefcount(val)
def test_nativefile_write_memoryview():
f = pa.BufferOutputStream()
data = b'ok'
arr = np.frombuffer(data, dtype='S1')
f.write(arr)
f.write(bytearray(data))
buf = f.get_result()
assert buf.to_pybytes() == data * 2
# ----------------------------------------------------------------------
# Mock output stream
def test_mock_output_stream():
# Make sure that the MockOutputStream and the BufferOutputStream record the
# same size
# 10 bytes
val = b'dataabcdef'
f1 = pa.MockOutputStream()
f2 = pa.BufferOutputStream()
K = 1000
for i in range(K):
f1.write(val)
f2.write(val)
assert f1.size() == len(f2.get_result())
# Do the same test with a pandas DataFrame
val = pd.DataFrame({'a': [1, 2, 3]})
record_batch = pa.RecordBatch.from_pandas(val)
f1 = pa.MockOutputStream()
f2 = pa.BufferOutputStream()
stream_writer1 = pa.RecordBatchStreamWriter(f1, record_batch.schema)
stream_writer2 = pa.RecordBatchStreamWriter(f2, record_batch.schema)
stream_writer1.write_batch(record_batch)
stream_writer2.write_batch(record_batch)
stream_writer1.close()
stream_writer2.close()
assert f1.size() == len(f2.get_result())
# ----------------------------------------------------------------------
# OS files and memory maps
@pytest.fixture
def sample_disk_data(request, tmpdir):
SIZE = 4096
arr = np.random.randint(0, 256, size=SIZE).astype('u1')
data = arr.tobytes()[:SIZE]
path = os.path.join(str(tmpdir), guid())
with open(path, 'wb') as f:
f.write(data)
def teardown():
_try_delete(path)
request.addfinalizer(teardown)
return path, data
def _check_native_file_reader(FACTORY, sample_data):
path, data = sample_data
f = FACTORY(path, mode='r')
assert f.read(10) == data[:10]
assert f.read(0) == b''
assert f.tell() == 10
assert f.read() == data[10:]
assert f.size() == len(data)
f.seek(0)
assert f.tell() == 0
# Seeking past end of file not supported in memory maps
f.seek(len(data) + 1)
assert f.tell() == len(data) + 1
assert f.read(5) == b''
# Test whence argument of seek, ARROW-1287
assert f.seek(3) == 3
assert f.seek(3, os.SEEK_CUR) == 6
assert f.tell() == 6
ex_length = len(data) - 2
assert f.seek(-2, os.SEEK_END) == ex_length
assert f.tell() == ex_length
def test_memory_map_reader(sample_disk_data):
_check_native_file_reader(pa.memory_map, sample_disk_data)
def test_memory_map_retain_buffer_reference(sample_disk_data):
path, data = sample_disk_data
cases = []
with pa.memory_map(path, 'rb') as f:
cases.append((f.read_buffer(100), data[:100]))
cases.append((f.read_buffer(100), data[100:200]))
cases.append((f.read_buffer(100), data[200:300]))
# Call gc.collect() for good measure
gc.collect()
for buf, expected in cases:
assert buf.to_pybytes() == expected
def test_os_file_reader(sample_disk_data):
_check_native_file_reader(pa.OSFile, sample_disk_data)
def _try_delete(path):
try:
os.remove(path)
except os.error:
pass
def test_memory_map_writer(tmpdir):
SIZE = 4096
arr = np.random.randint(0, 256, size=SIZE).astype('u1')
data = arr.tobytes()[:SIZE]
path = os.path.join(str(tmpdir), guid())
with open(path, 'wb') as f:
f.write(data)
f = pa.memory_map(path, mode='r+b')
f.seek(10)
f.write('peekaboo')
assert f.tell() == 18
f.seek(10)
assert f.read(8) == b'peekaboo'
f2 = pa.memory_map(path, mode='r+b')
f2.seek(10)
f2.write(b'booapeak')
f2.seek(10)
f.seek(10)
assert f.read(8) == b'booapeak'
# Does not truncate file
f3 = pa.memory_map(path, mode='w')
f3.write('foo')
with pa.memory_map(path) as f4:
assert f4.size() == SIZE
with pytest.raises(IOError):
f3.read(5)
f.seek(0)
assert f.read(3) == b'foo'
def test_os_file_writer(tmpdir):
SIZE = 4096
arr = np.random.randint(0, 256, size=SIZE).astype('u1')
data = arr.tobytes()[:SIZE]
path = os.path.join(str(tmpdir), guid())
with open(path, 'wb') as f:
f.write(data)
# Truncates file
f2 = pa.OSFile(path, mode='w')
f2.write('foo')
with pa.OSFile(path) as f3:
assert f3.size() == 3
with pytest.raises(IOError):
f2.read(5)
def test_native_file_modes(tmpdir):
path = os.path.join(str(tmpdir), guid())
with open(path, 'wb') as f:
f.write(b'foooo')
with pa.OSFile(path, mode='r') as f:
assert f.mode == 'rb'
with pa.OSFile(path, mode='rb') as f:
assert f.mode == 'rb'
with pa.OSFile(path, mode='w') as f:
assert f.mode == 'wb'
with pa.OSFile(path, mode='wb') as f:
assert f.mode == 'wb'
with open(path, 'wb') as f:
f.write(b'foooo')
with pa.memory_map(path, 'r') as f:
assert f.mode == 'rb'
with pa.memory_map(path, 'r+') as f:
assert f.mode == 'rb+'
with pa.memory_map(path, 'r+b') as f:
assert f.mode == 'rb+'
| apache-2.0 |
arunlodhi/pymc3 | pymc3/backends/tracetab.py | 8 | 1818 | """Functions for converting traces into a table-like format
"""
import numpy as np
import pandas as pd
__all__ = ['trace_to_dataframe']
def trace_to_dataframe(trace, chains=None, flat_names=None):
"""Convert trace to Pandas DataFrame.
Parameters
----------
trace : NDarray trace
chains : int or list of ints
Chains to include. If None, all chains are used. A single
chain value can also be given.
flat_names : dict or None
A dictionary that maps each variable name in `trace` to a list
"""
var_shapes = trace._straces[0].var_shapes
if flat_names is None:
flat_names = {v: create_flat_names(v, shape)
for v, shape in var_shapes.items()}
var_dfs = []
for varname, shape in var_shapes.items():
vals = trace.get_values(varname, combine=True, chains=chains)
flat_vals = vals.reshape(vals.shape[0], -1)
var_dfs.append(pd.DataFrame(flat_vals, columns=flat_names[varname]))
return pd.concat(var_dfs, axis=1)
def create_flat_names(varname, shape):
"""Return flat variable names for `varname` of `shape`.
Examples
--------
>>> create_flat_names('x', (5,))
['x__0', 'x__1', 'x__2', 'x__3', 'x__4']
>>> create_flat_names('x', (2, 2))
['x__0_0', 'x__0_1', 'x__1_0', 'x__1_1']
"""
if not shape:
return [varname]
labels = (np.ravel(xs).tolist() for xs in np.indices(shape))
labels = (map(str, xs) for xs in labels)
return ['{}__{}'.format(varname, '_'.join(idxs)) for idxs in zip(*labels)]
def _create_shape(flat_names):
"Determine shape from `create_flat_names` output."
try:
_, shape_str = flat_names[-1].rsplit('__', 1)
except ValueError:
return ()
return tuple(int(i) + 1 for i in shape_str.split('_'))
| apache-2.0 |
charanpald/wallhack | wallhack/viroscopy/model/ProcessResults.py | 1 | 17156 | import numpy
import logging
import sys
import multiprocessing
import os
from apgl.graph.GraphStatistics import GraphStatistics
from sandbox.util.PathDefaults import PathDefaults
from sandbox.util.Util import Util
from sandbox.util.Latex import Latex
from sandbox.util.FileLock import FileLock
from sandbox.predictors.ABCSMC import loadThetaArray
from wallhack.viroscopy.model.HIVModelUtils import HIVModelUtils
assert False, "Must run with -O flag"
FORMAT = "%(levelname)s:root:%(process)d:%(message)s"
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=FORMAT)
numpy.set_printoptions(suppress=True, precision=4, linewidth=150)
processReal = False
saveResults = False
def loadParams(ind):
if processReal:
resultsDir = PathDefaults.getOutputDir() + "viroscopy/real/theta" + str(ind) + "/"
outputDir = resultsDir + "stats/"
N, matchAlpha, breakScale, numEpsilons, epsilon, minEpsilon, matchAlg, abcMaxRuns, batchSize, pertScale = HIVModelUtils.realABCParams(True)
startDate, endDate, recordStep, M, targetGraph, numInds = HIVModelUtils.realSimulationParams(test=True, ind=ind)
realTheta, sigmaTheta, pertTheta = HIVModelUtils.estimatedRealTheta(ind)
numInds=2
prefix = "Real"
else:
resultsDir = PathDefaults.getOutputDir() + "viroscopy/toy/theta/"
outputDir = resultsDir + "stats/"
N, matchAlpha, breakScale, numEpsilons, epsilon, minEpsilon, matchAlg, abcMaxRuns, batchSize, pertScale = HIVModelUtils.toyABCParams()
startDate, endDate, recordStep, M, targetGraph = HIVModelUtils.toySimulationParams(test=True)
realTheta, sigmaTheta, pertTheta = HIVModelUtils.toyTheta()
prefix = "Toy"
numInds = 1
breakSize = (targetGraph.subgraph(targetGraph.removedIndsAt(endDate)).size - targetGraph.subgraph(targetGraph.removedIndsAt(startDate)).size) * breakScale
return N, resultsDir, outputDir, recordStep, startDate, endDate, prefix, targetGraph, breakSize, numEpsilons, M, matchAlpha, matchAlg, numInds
def saveStats(args):
i, theta = args
resultsFileName = outputDir + "SimStats" + str(i) + ".pkl"
lock = FileLock(resultsFileName)
if not lock.fileExists() and not lock.isLocked():
lock.lock()
model = HIVModelUtils.createModel(targetGraph, startDate, endDate, recordStep, M, matchAlpha, breakSize, matchAlg, theta=thetaArray[i])
times, infectedIndices, removedIndices, graph, compTimes, graphMetrics = HIVModelUtils.simulate(model)
times = numpy.arange(startDate, endDate+1, recordStep)
vertexArray, infectedIndices, removedIndices, contactGraphStats, removedGraphStats, finalRemovedDegrees = HIVModelUtils.generateStatistics(graph, times)
stats = times, vertexArray, infectedIndices, removedGraphStats, finalRemovedDegrees, graphMetrics.objectives, compTimes
Util.savePickle(stats, resultsFileName)
lock.unlock()
else:
logging.debug("Results already computed: " + str(resultsFileName))
N, resultsDir, outputDir, recordStep, startDate, endDate, prefix, targetGraph, breakSize, numEpsilons, M, matchAlpha, matchAlg, numInds = loadParams(0)
if saveResults:
for ind in range(numInds):
logging.debug("Record step: " + str(recordStep))
logging.debug("Start date: " + str(startDate))
logging.debug("End date: " + str(endDate))
logging.debug("End date - start date: " + str(endDate - startDate))
N, resultsDir, outputDir, recordStep, startDate, endDate, prefix, targetGraph, breakSize, numEpsilons, M, matchAlpha, matchAlg, numInds = loadParams(ind)
t = 0
for i in range(numEpsilons):
thetaArray, objArray = loadThetaArray(N, resultsDir, i)
if thetaArray.shape[0] == N:
t = i
logging.debug("Using population " + str(t))
try:
os.mkdir(outputDir)
except:
logging.debug("Directory exists: " + outputDir)
thetaArray = loadThetaArray(N, resultsDir, t)[0]
logging.debug(thetaArray)
paramList = []
for i in range(thetaArray.shape[0]):
paramList.append((i, thetaArray[i, :]))
pool = multiprocessing.Pool(multiprocessing.cpu_count())
resultIterator = pool.map(saveStats, paramList)
#resultIterator = map(saveStats, paramList)
pool.terminate()
#Now save the statistics on the target graph
times = numpy.arange(startDate, endDate+1, recordStep)
vertexArray, infectedIndices, removedIndices, contactGraphStats, removedGraphStats, finalRemovedDegrees = HIVModelUtils.generateStatistics(targetGraph, times)
stats = vertexArray, infectedIndices, removedIndices, contactGraphStats, removedGraphStats, finalRemovedDegrees
resultsFileName = outputDir + "IdealStats.pkl"
Util.savePickle(stats, resultsFileName)
else:
import matplotlib
matplotlib.use("GTK3Agg")
import matplotlib.pyplot as plt
plotStyles = ['k-', 'kx-', 'k+-', 'k.-', 'k*-']
N, resultsDir, outputDir, recordStep, startDate, endDate, prefix, targetGraph, breakSize, numEpsilons, M, matchAlpha, matchAlg, numInds = loadParams(0)
inds = range(numInds)
numRecordSteps = int((endDate-startDate)/recordStep)+1
#We store: number of detections, CT detections, rand detections, infectives, max componnent size, num components, edges, objectives
numMeasures = 12
numTimings = 2
thetas = []
measures = numpy.zeros((len(inds), numMeasures, N, numRecordSteps))
idealMeasures = numpy.zeros((len(inds), numMeasures, numRecordSteps))
timings = numpy.zeros((len(inds), numTimings, N))
numDegrees = 20
degreeDists = numpy.zeros((len(inds), numDegrees, N))
idealDegreeDists = numpy.zeros((len(inds), numDegrees))
#Note all the inds
numDetectsInd = 0
maleInd = 1
femaleInd = 2
heteroInd = 3
biInd = 4
randDetectInd = 5
contactDetectInd = 6
infectedInd = 7
numCompsInd = 8
maxCompSizeInd = 9
numEdgesInd = 10
objsInd = 11
plotInd = 0
if processReal:
timeInds = [5, 6]
else:
timeInds = [10, 11, 12, 13]
for ind in inds:
logging.debug("ind=" + str(ind))
N, resultsDir, outputDir, recordStep, startDate, endDate, prefix, targetGraph, breakSize, numEpsilons, M, matchAlpha, matchAlg, numInds = loadParams(ind)
#Find the max number t for which we have a complete set of particles
t = 0
for i in range(numEpsilons):
thetaArray, objArray = loadThetaArray(N, resultsDir, i)
if thetaArray.shape[0] == N:
t = i
logging.debug("Using particle number " + str(t))
times = numpy.arange(startDate, endDate+1, recordStep)
realTheta, sigmaTheta, purtTheta = HIVModelUtils.toyTheta()
thetaArray, objArray = loadThetaArray(N, resultsDir, t)
thetas.append(thetaArray)
print(thetaArray)
resultsFileName = outputDir + "IdealStats.pkl"
stats = Util.loadPickle(resultsFileName)
vertexArrayIdeal, idealInfectedIndices, idealRemovedIndices, idealContactGraphStats, idealRemovedGraphStats, idealFinalRemovedDegrees = stats
graphStats = GraphStatistics()
idealMeasures[ind, numDetectsInd, :] = vertexArrayIdeal[:, numDetectsInd]
idealMeasures[ind, maleInd, :] = vertexArrayIdeal[:, maleInd]
idealMeasures[ind, femaleInd, :] = vertexArrayIdeal[:, femaleInd]
idealMeasures[ind, heteroInd, :] = vertexArrayIdeal[:, heteroInd]
idealMeasures[ind, biInd, :] = vertexArrayIdeal[:, biInd]
idealMeasures[ind, randDetectInd, :] = vertexArrayIdeal[:, randDetectInd]
idealMeasures[ind, contactDetectInd, :] = vertexArrayIdeal[:, contactDetectInd]
idealMeasures[ind, numCompsInd, :] = idealRemovedGraphStats[:, graphStats.numComponentsIndex]
idealMeasures[ind, maxCompSizeInd, :] = idealRemovedGraphStats[:, graphStats.maxComponentSizeIndex]
idealMeasures[ind, numEdgesInd, :] = idealRemovedGraphStats[:, graphStats.numEdgesIndex]
maxDegrees = min(idealFinalRemovedDegrees.shape[0], numDegrees)
idealDegreeDists[ind, 0:maxDegrees] = idealFinalRemovedDegrees[0:maxDegrees]
for i in range(thetaArray.shape[0]):
resultsFileName = outputDir + "SimStats" + str(i) + ".pkl"
stats = Util.loadPickle(resultsFileName)
times, vertexArray, infectedIndices, removedGraphStats, finalRemovedDegrees, objs, compTimes = stats
measures[ind, numDetectsInd, i, :] = vertexArray[:, numDetectsInd]
measures[ind, maleInd, i, :] = vertexArray[:, maleInd]
measures[ind, femaleInd, i, :] = vertexArray[:, femaleInd]
measures[ind, heteroInd, i, :] = vertexArray[:, heteroInd]
measures[ind, biInd, i, :] = vertexArray[:, biInd]
measures[ind, randDetectInd, i, :] = vertexArray[:, randDetectInd]
measures[ind, contactDetectInd, i, :] = vertexArray[:, contactDetectInd]
measures[ind, infectedInd, i, :] = numpy.array([len(x) for x in infectedIndices])
measures[ind, numCompsInd, i, :] = removedGraphStats[:, graphStats.numComponentsIndex]
measures[ind, maxCompSizeInd, i, :] = removedGraphStats[:, graphStats.maxComponentSizeIndex]
measures[ind, numEdgesInd, i, :] = removedGraphStats[:, graphStats.numEdgesIndex]
measures[ind, objsInd, i, 1:] = objs
maxDegrees = min(finalRemovedDegrees.shape[0], numDegrees)
degreeDists[ind, 0:maxDegrees, i] = finalRemovedDegrees[0:maxDegrees]
#objectives[inds, i, :] = objs
timings[ind, :, i] = compTimes
times = times - numpy.min(times)
logging.debug("times="+str(times))
logging.debug("computational times="+str(numpy.mean(timings, 2)))
meanMeasures = numpy.mean(measures, 2)
stdMeasures = numpy.std(measures, 2)
#Infections and detections
plt.figure(plotInd)
if not processReal:
numInfects = [len(x) for x in idealInfectedIndices]
plt.errorbar(times, meanMeasures[ind, infectedInd, :], color="k", yerr=stdMeasures[ind, infectedInd, :], label="est. infectives")
plt.plot(times, numInfects, "k--", label="infectives")
plt.errorbar(times, meanMeasures[ind, numDetectsInd, :], color="r", yerr=stdMeasures[ind, numDetectsInd, :], label="est. detections")
plt.plot(times, idealMeasures[ind, numDetectsInd, :], "r--", label="detections")
plt.xlabel("time (days)")
if not processReal:
plt.ylabel("infectives/detections")
lims = plt.xlim()
plt.xlim([0, lims[1]])
filename = outputDir + prefix + "InfectDetects.eps"
else:
plt.ylabel("detections")
filename = outputDir + prefix + "Detects" + str(ind) + ".eps"
plt.legend(loc="lower right")
plt.savefig(filename)
plotInd += 1
#Gender
plt.figure(plotInd)
plt.errorbar(times, meanMeasures[ind, maleInd, :], yerr=stdMeasures[ind, maleInd, :], label="est. male")
plt.plot(times, idealMeasures[ind, maleInd, :], "r", label="male")
plt.errorbar(times, meanMeasures[ind, femaleInd, :], yerr=stdMeasures[ind, femaleInd, :], label="est. female")
plt.plot(times, idealMeasures[ind, femaleInd, :], "k", label="female")
plt.xlabel("time (days)")
plt.ylabel("detections")
plt.legend(loc="upper left")
plotInd += 1
#Orientation
plt.figure(plotInd)
plt.errorbar(times, meanMeasures[ind, heteroInd, :], yerr=stdMeasures[ind, heteroInd, :], label="est. hetero")
plt.plot(times, idealMeasures[ind, heteroInd, :], "r", label="hetero")
plt.errorbar(times, meanMeasures[ind, biInd, :], yerr=stdMeasures[ind, biInd, :], label="est. bi")
plt.plot(times, idealMeasures[ind, biInd, :], "k", label="bi")
plt.xlabel("time (days)")
plt.ylabel("detections")
plt.legend(loc="upper left")
plotInd += 1
#Contact tracing rand random detections
plt.figure(plotInd)
if processReal:
plt.errorbar(times, meanMeasures[ind, numDetectsInd, :], color="k", yerr=stdMeasures[ind, numDetectsInd, :], label="est. detections")
plt.plot(times, idealMeasures[ind, numDetectsInd, :], "k--", label="detections")
plt.errorbar(times, meanMeasures[ind, contactDetectInd, :], color="r", yerr=stdMeasures[ind, contactDetectInd, :], label="est. CT detections")
plt.plot(times, idealMeasures[ind, contactDetectInd, :], "r--", label="CT detections")
plt.errorbar(times, meanMeasures[ind, randDetectInd, :], color="b", yerr=stdMeasures[ind, randDetectInd, :], label="est. random detections")
plt.plot(times, idealMeasures[ind, randDetectInd, :], "b--", label="random detections")
plt.xlabel("time (days)")
plt.ylabel("detections")
if not processReal:
lims = plt.xlim()
plt.xlim([0, lims[1]])
plt.legend(loc="upper left")
plt.savefig(outputDir + prefix + "CTRandDetects" + str(ind) + ".eps")
plotInd += 1
#Number of components
plt.figure(plotInd)
plt.errorbar(times, meanMeasures[ind, numCompsInd, :], yerr=stdMeasures[ind, numCompsInd, :])
plt.xlabel("time (days)")
plt.ylabel("num components")
plt.plot(times, idealMeasures[ind, numCompsInd, :], "r")
plotInd += 1
#Max component size
plt.figure(plotInd)
plt.errorbar(times, meanMeasures[ind, maxCompSizeInd, :], yerr=stdMeasures[ind, maxCompSizeInd, :])
plt.xlabel("time (days)")
plt.ylabel("max component size")
plt.plot(times, idealMeasures[ind, maxCompSizeInd, :], "r")
plotInd += 1
#Num edges
plt.figure(plotInd)
plt.errorbar(times, meanMeasures[ind, numEdgesInd, :], yerr=stdMeasures[ind, numEdgesInd, :])
plt.xlabel("time (days)")
plt.ylabel("number of edges")
plt.plot(times, idealMeasures[ind, numEdgesInd, :], "r")
plotInd += 1
#Objectives
plt.figure(plotInd)
plt.errorbar(times[1:], meanMeasures[ind, objsInd, 1:], yerr=stdMeasures[ind, objsInd, 1:])
plt.xlabel("time (days)")
plt.ylabel("objectives")
plotInd += 1
#Degrees
meanDegreeDists = numpy.mean(degreeDists, 2)
stdDegreeDists = numpy.std(degreeDists, 2)
plt.figure(plotInd)
plt.errorbar(numpy.arange(numDegrees), meanDegreeDists[ind, :], yerr=stdDegreeDists[ind, :], color="k")
plt.plot(numpy.arange(numDegrees), idealDegreeDists[ind, :], "k--")
plt.xlabel("degree")
plt.ylabel("frequency")
plotInd += 1
#Print the table of thetas
thetas = numpy.array(thetas)
meanThetas = numpy.mean(thetas, 1)
stdThetas = numpy.std(thetas, 1)
table = Latex.array2DToRows(meanThetas.T, stdThetas.T, precision=4)
rowNames = ["$|\\mathcal{I}_0 |$", "$\\alpha$", "$\\gamma$", "$\\beta$", "$\\lambda$", "$\\sigma$"]
table = Latex.addRowNames(rowNames, table)
print(table)
#Now print the graph properties
idealTable = []
tableMeanArray = []
tableStdArray = []
for ind in inds:
idealTable.append(idealMeasures[ind, :, timeInds])
tableMeanArray.append(meanMeasures[ind, :, timeInds])
tableStdArray.append(stdMeasures[ind, :, timeInds])
idealTable = numpy.vstack(idealTable).T
tableMeanArray = numpy.vstack(tableMeanArray).T
tableStdArray = numpy.vstack(tableStdArray).T
rowNames = ["$|\\mathcal{R}_{t_0}|$.", "male", "female", "hetero", "bi", "RD", "CT", "$|\\mathcal{I}_{t_0}|$", "NC", "LC", "$|E|$", "objs"]
idealTable = Latex.array2DToRows(idealTable, precision=0)
idealTable = Latex.addRowNames(rowNames, idealTable)
print(idealTable)
rowNames = [x + " est." for x in rowNames]
table = Latex.array2DToRows(tableMeanArray, tableStdArray, precision=0)
table = Latex.addRowNames(rowNames, table)
print(table)
#Now print timings
rowNames = [str(x) for x in range(numInds)]
table = Latex.array2DToRows(numpy.mean(timings, 2), numpy.std(timings, 2), precision=1)
table = Latex.addRowNames(rowNames, table)
print(table)
plt.show()
| gpl-3.0 |
seckcoder/lang-learn | python/sklearn/sklearn/cluster/tests/test_affinity_propagation.py | 1 | 1989 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import (assert_equal, assert_array_equal,
assert_raises)
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
"""Affinity Propagation algorithm
"""
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(S,
preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
| unlicense |
mxjl620/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
haphaeu/yoshimi | PandasDataFrame/dataframe.py | 1 | 3901 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 06 16:03:29 2015
@author: rarossi
"""
import pandas as pd
import numpy as np
from scipy import stats as ss
#
# data = pd.read_table('data.txt')
data = pd.read_table('Results.txt')
#
# don't worry too much about this ugly generator,
# it just emulates the format of my data...
# total = 4500
# data = pd.DataFrame()
# data['Hs'] = np.random.randint(1,4,size=total)
# data['Tp'] = np.random.randint(5,15,size=total)
# data['wd'] = [[165, 180, 195][np.random.randint(0,3)] for _ in xrange(total)]
# data['seed'] = np.random.randint(1,51,size=total)
# data['Tmax'] = np.random.randint(100,250,size=total)
# data['Tmin'] = np.random.randint(10,25,size=total)
# %%
# and here it starts. would the creators of pandas pull their hair out if
# they see this?
# can this be made better?
cols = ['Hs', 'Tp']
cols.extend(data.columns[4:])
stdev = pd.DataFrame(columns=cols)
i = 0
for hs in set(data['Hs']):
data_Hs = data[data['Hs'] == hs]
for tp in set(data_Hs['Tp']):
data_tp = data_Hs[data_Hs['Tp'] == tp]
rw = [hs, tp]
for c in cols[2:]:
if c.lower().find('max') is not -1: # max
# # moment estimators
# mx = max([np.mean(data_tp[data_tp['wd'] == wd][c]) +
# 1.305*np.std(data_tp[data_tp['wd'] == wd][c])
# for wd in set(data_tp['wd'])])
# MLE's
mx = max([ss.gumbel_r(*ss.gumbel_r.fit(
data_tp[data_tp['wd'] == wd][c])).ppf(0.9)
for wd in set(data_tp['wd'])])
else:
# # moment estimators
# mx = min([np.mean(data_tp[data_tp['wd'] == wd][c]) +
# 1.305*np.std(data_tp[data_tp['wd'] == wd][c])
# for wd in set(data_tp['wd'])])
# MLE's
mx = min([ss.gumbel_l(*ss.gumbel_l.fit(
data_tp[data_tp['wd'] == wd][c])).ppf(0.1)
for wd in set(data_tp['wd'])])
rw.extend([mx])
stdev.loc[i] = rw
i += 1
# adjust and sort index
stdev = stdev.sort_index(by=['Hs', 'Tp'])
stdev = stdev.reset_index()
del stdev['index']
# %%
# this works, but how to apply +/- depending on max/min
stdev3 = (1.305 * data.groupby(['Hs', 'Tp', 'wd'])[['Tmax', 'Tmin']].std() +
data.groupby(['Hs', 'Tp', 'wd'])[['Tmax', 'Tmin']].mean()).max(
level=[0, 1]).reset_index()
# %%
# this works for one variable only
# the .max(level=['Hs', 'Tp']) does the magic in getting the worst wave heading
stdev4 = data.groupby(['Hs', 'Tp', 'wd'])['Tmax'].apply(
lambda x: ss.gumbel_r(*ss.gumbel_r.fit(x)).ppf(0.9)).max(
level=['Hs', 'Tp']).reset_index()
# one possibility is to loop, once stdev4 is set, add Tmin with:
stdev4['Tmin'] = data.groupby(['Hs', 'Tp', 'wd'])['Tmin'].apply(
lambda x: ss.gumbel_l(*ss.gumbel_l.fit(x)).ppf(0.1)).min(
level=['Hs', 'Tp']).reset_index()['Tmin']
# %%
stdev['Tmax'].plot()
# stdev3['Tmax'].plot()
stdev4['Tmax'].plot(style='r--')
# %%
'''
# gumbel can be fit and frozen in one line by unpacking (*)
# the returned tuple from fit:
g = gumbel_l(*gumbel_l.fit(data))
# some tips to dataframe
stdev2 = data.groupby(['Hs', 'Tp', 'wd'])[['Tmax', 'Tmin']].std().max(
level=[0, 1]).reset_index()
data.ix[0:4, ['Hs', 'Tp']]
data.ix[[0,10,22], ['Hs', 'Tp']]
data.get_value(0, 'Hs')
gb = data.groupby(['Hs', 'Tp', 'wd'])
def get_max_Tmax(group):
return group.ix[group.Tmax.idxmax()]
data.groupby('Hs').apply(get_max_Tmax)
#apply can have arguments to func
res = data.groupby(['Hs','Tp'])['Tmax'].describe()
'''
# data=data.set_index(['Hs', 'Tp', 'wd'])
# gb = data.groupby(['Hs', 'Tp', 'wd'])
# res = gb.apply(max)
| lgpl-3.0 |
procoder317/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
Djabbz/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 4 | 39718 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2*ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=1,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less( mse, 6.0 )
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0, max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
elvandy/nltools | nltools/external/srm.py | 1 | 22395 | #!/usr/bin/env python
# coding: latin-1
""" Shared Response Model (SRM)
===========================
The implementations are based on the following publications:
Chen, P. H. C., Chen, J., Yeshurun, Y., Hasson, U., Haxby, J., & Ramadge,
P. J. (2015). A reduced-dimension fMRI shared response model. In Advances
in Neural Information Processing Systems (pp. 460-468).
Anderson, M. J., Capota, M., Turek, J. S., Zhu, X., Willke, T. L., Wang,
Y., & Norman, K. A. (2016, December). Enabling factor analysis on
thousand-subject neuroimaging datasets. In Big Data (Big Data),
2016 IEEE International Conference on (pp. 1151-1160). IEEE.
Copyright 2016 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Authors: Po-Hsuan Chen (Princeton Neuroscience Institute) and Javier Turek
# (Intel Labs), 2015
from __future__ import division
import logging
import numpy as np
import scipy
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import assert_all_finite
from sklearn.utils.validation import NotFittedError
__all__ = [
"SRM", "DetSRM"
]
logger = logging.getLogger(__name__)
def _init_w_transforms(data, features):
"""Initialize the mappings (Wi) for the SRM with random orthogonal matrices.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
features : int
The number of features in the model.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The initialized orthogonal transforms (mappings) :math:`W_i` for each
subject.
voxels : list of int
A list with the number of voxels per subject.
Note
----
This function assumes that the numpy random number generator was
initialized.
Not thread safe.
"""
w = []
subjects = len(data)
voxels = np.empty(subjects, dtype=int)
# Set Wi to a random orthogonal voxels by features matrix
for subject in range(subjects):
voxels[subject] = data[subject].shape[0]
rnd_matrix = np.random.random((voxels[subject], features))
q, r = np.linalg.qr(rnd_matrix)
w.append(q)
return w, voxels
class SRM(BaseEstimator, TransformerMixin):
"""Probabilistic Shared Response Model (SRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject:
.. math:: X_i \\approx W_i S, \\forall i=1 \\dots N
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
sigma_s_ : array, shape=[features, features]
The covariance of the shared response Normal distribution.
mu_ : list of array, element i has shape=[voxels_i]
The voxel means over the samples for each subject.
rho2_ : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
Note
----
The number of voxels may be different between subjects. However, the
number of samples must be the same across subjects.
The probabilistic Shared Response Model is approximated using the
Expectation Maximization (EM) algorithm proposed in [Chen2015]_. The
implementation follows the optimizations published in [Anderson2016]_.
This is a single node version.
The run-time complexity is :math:`O(I (V T K + V K^2 + K^3))` and the
memory complexity is :math:`O(V T)` with I - the number of iterations,
V - the sum of voxels from all subjects, T - the number of samples, and
K - the number of features (typically, :math:`V \\gg T \\gg K`).
"""
def __init__(self, n_iter=10, features=50, rand_seed=0):
self.n_iter = n_iter
self.features = features
self.rand_seed = rand_seed
return
def fit(self, X, y=None):
"""Compute the probabilistic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Probabilistic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.sigma_s_, self.w_, self.mu_, self.rho2_, self.s_ = self._srm(X)
return self
def transform(self, X, y=None):
"""Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects
y : not used (as it is unsupervised learning)
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def _init_structures(self, data, subjects):
"""Initializes data structures for SRM and preprocess the data.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
subjects : int
The total number of subjects in `data`.
Returns
-------
x : list of array, element i has shape=[voxels_i, samples]
Demeaned data for each subject.
mu : list of array, element i has shape=[voxels_i]
Voxel means over samples, per subject.
rho2 : array, shape=[subjects]
Noise variance :math:`\\rho^2` per subject.
trace_xtx : array, shape=[subjects]
The squared Frobenius norm of the demeaned data in `x`.
"""
x = []
mu = []
rho2 = np.zeros(subjects)
trace_xtx = np.zeros(subjects)
for subject in range(subjects):
mu.append(np.mean(data[subject], 1))
rho2[subject] = 1
trace_xtx[subject] = np.sum(data[subject] ** 2)
x.append(data[subject] - mu[subject][:, np.newaxis])
return x, mu, rho2, trace_xtx
def _likelihood(self, chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples):
"""Calculate the log-likelihood function
Parameters
----------
chol_sigma_s_rhos : array, shape=[features, features]
Cholesky factorization of the matrix (Sigma_S + sum_i(1/rho_i^2)
* I)
log_det_psi : float
Determinant of diagonal matrix Psi (containing the rho_i^2 value
voxels_i times).
chol_sigma_s : array, shape=[features, features]
Cholesky factorization of the matrix Sigma_S
trace_xt_invsigma2_x : float
Trace of :math:`\\sum_i (||X_i||_F^2/\\rho_i^2)`
inv_sigma_s_rhos : array, shape=[features, features]
Inverse of :math:`(\\Sigma_S + \\sum_i(1/\\rho_i^2) * I)`
wt_invpsi_x : array, shape=[features, samples]
samples : int
The total number of samples in the data.
Returns
-------
loglikehood : float
The log-likelihood value.
"""
log_det = (np.log(np.diag(chol_sigma_s_rhos) ** 2).sum() + log_det_psi
+ np.log(np.diag(chol_sigma_s) ** 2).sum())
loglikehood = -0.5 * samples * log_det - 0.5 * trace_xt_invsigma2_x
loglikehood += 0.5 * np.trace(
wt_invpsi_x.T.dot(inv_sigma_s_rhos).dot(wt_invpsi_x))
# + const --> -0.5*nTR*nvoxel*subjects*math.log(2*math.pi)
return loglikehood
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
sigma_s : array, shape=[features, features]
The covariance :math:`\\Sigma_s` of the shared response Normal
distribution.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
mu : list of array, element i has shape=[voxels_i]
The voxel means :math:`\\mu_i` over the samples for each subject.
rho2 : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
s : array, shape=[features, samples]
The shared response.
"""
samples = data[0].shape[1]
subjects = len(data)
np.random.seed(self.rand_seed)
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject, and trace_xtx with
# the ||X_i||_F^2 of each subject.
w, voxels = _init_w_transforms(data, self.features)
x, mu, rho2, trace_xtx = self._init_structures(data, subjects)
shared_response = np.zeros((self.features, samples))
sigma_s = np.identity(self.features)
# Main loop of the algorithm (run
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# E-step:
# Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W
rho0 = (1 / rho2).sum()
# Invert Sigma_s using Cholesky factorization
(chol_sigma_s, lower_sigma_s) = scipy.linalg.cho_factor(
sigma_s, check_finite=False)
inv_sigma_s = scipy.linalg.cho_solve(
(chol_sigma_s, lower_sigma_s), np.identity(self.features),
check_finite=False)
# Invert (Sigma_s + rho_0 * I) using Cholesky factorization
sigma_s_rhos = inv_sigma_s + np.identity(self.features) * rho0
(chol_sigma_s_rhos, lower_sigma_s_rhos) = scipy.linalg.cho_factor(
sigma_s_rhos, check_finite=False)
inv_sigma_s_rhos = scipy.linalg.cho_solve(
(chol_sigma_s_rhos, lower_sigma_s_rhos),
np.identity(self.features), check_finite=False)
# Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces
# of X_i^T * rho_i^-2 * X_i
wt_invpsi_x = np.zeros((self.features, samples))
trace_xt_invsigma2_x = 0.0
for subject in range(subjects):
wt_invpsi_x += (w[subject].T.dot(x[subject])) / rho2[subject]
trace_xt_invsigma2_x += trace_xtx[subject] / rho2[subject]
log_det_psi = np.sum(np.log(rho2) * voxels)
# Update the shared response
shared_response = sigma_s.dot(
np.identity(self.features) - rho0 * inv_sigma_s_rhos).dot(
wt_invpsi_x)
# M-step
# Update Sigma_s and compute its trace
sigma_s = (inv_sigma_s_rhos
+ shared_response.dot(shared_response.T) / samples)
trace_sigma_s = samples * np.trace(sigma_s)
# Update each subject's mapping transform W_i and error variance
# rho_i^2
for subject in range(subjects):
a_subject = x[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, s_subject, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
rho2[subject] = trace_xtx[subject]
rho2[subject] += -2 * np.sum(w[subject] * a_subject).sum()
rho2[subject] += trace_sigma_s
rho2[subject] /= samples * voxels[subject]
if logger.isEnabledFor(logging.INFO):
# Calculate and log the current log-likelihood for checking
# convergence
loglike = self._likelihood(
chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples)
logger.info('Objective function %f' % loglike)
return sigma_s, w, mu, rho2, shared_response
class DetSRM(BaseEstimator, TransformerMixin):
"""Deterministic Shared Response Model (DetSRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject:
.. math:: X_i \\approx W_i S, \\forall i=1 \\dots N
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
Note
----
The number of voxels may be different between subjects. However, the
number of samples must be the same across subjects.
The Deterministic Shared Response Model is approximated using the
Block Coordinate Descent (BCD) algorithm proposed in [Chen2015]_.
This is a single node version.
The run-time complexity is :math:`O(I (V T K + V K^2))` and the memory
complexity is :math:`O(V T)` with I - the number of iterations, V - the
sum of voxels from all subjects, T - the number of samples, K - the
number of features (typically, :math:`V \\gg T \\gg K`), and N - the
number of subjects.
"""
def __init__(self, n_iter=10, features=50, rand_seed=0):
self.n_iter = n_iter
self.features = features
self.rand_seed = rand_seed
return
def fit(self, X, y=None):
"""Compute the Deterministic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Deterministic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.w_, self.s_ = self._srm(X)
return self
def transform(self, X, y=None):
"""Use the model to transform data to the Shared Response subspace
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject.
y : not used
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def _objective_function(self, data, w, s):
"""Calculate the objective function
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response
Returns
-------
objective : float
The objective function value.
"""
subjects = len(data)
objective = 0.0
for m in range(subjects):
objective += \
np.linalg.norm(data[m] - w[m].dot(s), 'fro')**2
return objective * 0.5 / data[0].shape[1]
def _compute_shared_response(self, data, w):
""" Compute the shared response S
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
s : array, shape=[features, samples]
The shared response for the subjects data with the mappings in w.
"""
s = np.zeros((w[0].shape[1], data[0].shape[1]))
for m in range(len(w)):
s = s + w[m].T.dot(data[m])
s /= len(w)
return s
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
"""
subjects = len(data)
np.random.seed(self.rand_seed)
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject.
w, _ = _init_w_transforms(data, self.features)
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
# Main loop of the algorithm
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# Update each subject's mapping transform W_i:
for subject in range(subjects):
a_subject = data[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, _, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
# Update the shared response:
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
return w, shared_response
| mit |
sonnyhu/scikit-learn | sklearn/tests/test_pipeline.py | 23 | 15392 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA(svd_solver='full')
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_X1d_inverse_transform():
transformer = TransfT()
pipeline = make_pipeline(transformer)
X = np.ones(10)
msg = "1d X will not be reshaped in pipeline.inverse_transform"
assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
| bsd-3-clause |
dnjohnstone/hyperspy | hyperspy/drawing/_widgets/scalebar.py | 4 | 5335 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from hyperspy.misc.math_tools import closest_nice_number
class ScaleBar(object):
def __init__(self, ax, units, pixel_size=None, color='white',
position=None, max_size_ratio=0.25, lw=2, length=None,
animated=False):
"""Add a scale bar to an image.
Parameters
----------
ax : matplotlib axes
The axes where to draw the scale bar.
units : str
pixel_size : {None, float}
If None the axes of the image are supposed to be calibrated.
Otherwise the pixel size must be specified.
color : a valid matplotlib color
position {None, (float, float)}
If None the position is automatically determined.
max_size_ratio : float
The maximum size of the scale bar in respect to the
length of the x axis
lw : int
The line width
length : {None, float}
If None the length is automatically calculated using the
max_size_ratio.
"""
self.animated = animated
self.ax = ax
self.units = units
self.pixel_size = pixel_size
self.xmin, self.xmax = ax.get_xlim()
self.ymin, self.ymax = ax.get_ylim()
self.text = None
self.line = None
self.tex_bold = False
if length is None:
self.calculate_size(max_size_ratio=max_size_ratio)
else:
self.length = length
if position is None:
self.position = self.calculate_line_position()
else:
self.position = position
self.calculate_text_position()
self.plot_scale(line_width=lw)
self.set_color(color)
def get_units_string(self):
if self.tex_bold is True:
if (self.units[0] and self.units[-1]) == '$':
return r'$\mathbf{%g\,%s}$' % \
(self.length, self.units[1:-1])
else:
return r'$\mathbf{%g\,}$\textbf{%s}' % \
(self.length, self.units)
else:
return r'$%g\,$%s' % (self.length, self.units)
def calculate_line_position(self, pad=0.05):
return ((1 - pad) * self.xmin + pad * self.xmax,
(1 - pad) * self.ymin + pad * self.ymax)
def calculate_text_position(self, pad=1 / 100.):
ps = self.pixel_size if self.pixel_size is not None else 1
x1, y1 = self.position
x2, y2 = x1 + self.length / ps, y1
self.text_position = ((x1 + x2) / 2.,
y2 + (self.ymax - self.ymin) / ps * pad)
def calculate_size(self, max_size_ratio=0.25):
ps = self.pixel_size if self.pixel_size is not None else 1
size = closest_nice_number(ps * (self.xmax - self.xmin) *
max_size_ratio)
self.length = size
def remove(self):
if self.line is not None:
self.ax.lines.remove(self.line)
if self.text is not None:
self.ax.texts.remove(self.text)
def plot_scale(self, line_width=1):
self.remove()
ps = self.pixel_size if self.pixel_size is not None else 1
x1, y1 = self.position
x2, y2 = x1 + self.length / ps, y1
self.line, = self.ax.plot([x1, x2], [y1, y2],
linestyle='-',
lw=line_width,
animated=self.animated)
self.text = self.ax.text(*self.text_position,
s=self.get_units_string(),
ha='center',
size='medium',
animated=self.animated)
self.ax.set_xlim(self.xmin, self.xmax)
self.ax.set_ylim(self.ymin, self.ymax)
self.ax.figure.canvas.draw_idle()
def _set_position(self, x, y):
self.position = x, y
self.calculate_text_position()
self.plot_scale(line_width=self.line.get_linewidth())
def set_color(self, c):
self.line.set_color(c)
self.text.set_color(c)
self.ax.figure.canvas.draw_idle()
def set_length(self, length):
color = self.line.get_color()
self.length = length
self.calculate_scale_size()
self.calculate_text_position()
self.plot_scale(line_width=self.line.get_linewidth())
self.set_color(color)
def set_tex_bold(self):
self.tex_bold = True
self.text.set_text(self.get_units_string())
self.ax.figure.canvas.draw_idle()
| gpl-3.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/discriminant_analysis.py | 7 | 27513 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from .utils import deprecated
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False), used
only in 'svd' solver.
.. versionadded:: 0.17
tol : float, optional, (default 1.0e-4)
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
evecs /= np.linalg.norm(evecs, axis=0)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = np.bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Get the maximum number of components
if self.n_components is None:
self._max_components = len(self.classes_) - 1
else:
self._max_components = min(len(self.classes_) - 1,
self.n_components)
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
store_covariance : boolean
If True the covariance matrices are computed and stored in the
`self.covariance_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Attributes
----------
covariance_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariance=False,
store_covariances=None, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariance=False,
tol=1.0e-4, store_covariances=None):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.store_covariance = store_covariance
self.tol = tol
@property
@deprecated("Attribute covariances_ was deprecated in version"
" 0.19 and will be removed in 0.21. Use "
"covariance_ instead")
def covariances_(self):
return self.covariance_
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
``store_covariances`` has been moved to main constructor as
``store_covariance``
.. versionchanged:: 0.19
``tol`` has been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
store_covariance = self.store_covariance or self.store_covariances
if self.store_covariances:
warnings.warn("'store_covariances' was renamed to store_covariance"
" in version 0.19 and will be removed in 0.21.",
DeprecationWarning)
if store_covariance:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariance or store_covariance:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariance or store_covariance:
self.covariance_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| mit |
ChanderG/scipy | scipy/interpolate/interpolate.py | 25 | 80287 | """ Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'RegularGridInterpolator',
'interpn']
import itertools
from numpy import (shape, sometrue, array, transpose, searchsorted,
ones, logical_or, atleast_1d, atleast_2d, ravel,
dot, poly1d, asarray, intp)
import numpy as np
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
import math
import warnings
import functools
import operator
from scipy._lib.six import xrange, integer_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
def reduce_sometrue(a):
all = a
while len(shape(all)) > 1:
all = sometrue(all, axis=0)
return all
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised.
fill_value : float, optional
If provided, then this value will be used to fill in for requested
points outside of the data range. If not provided, then the default
is NaN.
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=True, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.copy = copy
self.bounds_error = bounds_error
self.fill_value = fill_value
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0,'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
y = self._reshape_yi(y)
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
self.x_bds = (x[1:] + x[:-1]) / 2.0
self._call = self.__class__._call_nearest
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
self._spline = splmake(x, y, order=order)
self._call = self.__class__._call_spline
if len(x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
self._kind = kind
self.x = x
self._y = y
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return spleval(self._spline, x_new)
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
out_of_bounds = self._check_bounds(x_new)
y_new = self._call(self, x_new)
if len(y_new) > 0:
y_new[out_of_bounds] = self.fill_value
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
out_of_bounds = logical_or(below_bounds, above_bounds)
return out_of_bounds
class _PPolyBase(object):
"""
Base class for piecewise polynomials.
"""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
if np.any(self.x[1:] - self.x[:-1] < 0):
raise ValueError("x-coordinates are not in increasing order")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=True):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals
``self.x[-1] <= x < x_right[0]``, ``x_right[0] <= x < x_right[1]``,
..., ``x_right[m-2] <= x < x_right[m-1]``
x : ndarray, size (m,)
Additional breakpoints. Must be sorted and either to
the right or to the left of the current breakpoints.
right : bool, optional
Whether the new intervals are to the right or to the left
of the current intervals.
"""
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if right:
if x[0] < self.x[-1]:
raise ValueError("new x are not to the right of current ones")
else:
if x[-1] > self.x[0]:
raise ValueError("new x are not to the left of current ones")
if c.size == 0:
return
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if right:
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
else:
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ith interval is ``x[i] <= xp < x[i+1]``::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial. This representation
is the local power basis.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu,:].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
# construct a compatible polynomial
return self.construct_fast(c, self.x, self.extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
# Compute the integral
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.integrate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate),
out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : bool, optional
Whether to return roots from the polynomial extrapolated
based on first and last intervals.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep`
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**(a) * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ``i``-th interval ``x[i] <= xp < x[i+1]``
is written in the Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1))
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = comb(k, a) * t**k * (1 - t)**(k - a)
with ``t = (x - x[i]) / (x[i+1] - x[i])``.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, doi:10.1155/2011/829543
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k - nu representing the derivative
of this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k + nu representing the
antiderivative of this polynomial.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the breakpoint)
# Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k,:], axis=0)[:-1]
return self.construct_fast(c2, x, self.extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Defaults to ``self.extrapolate``.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is not None:
ib.extrapolate = extrapolate
return ib(b) - ib(a)
def extend(self, c, x, right=True):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, integer_types):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" %
(xi[i], len(y1), xi[i+1], len(y2), orders[i]))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating)
or np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x,y,z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype')
and not np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices, norm_distances, out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices, norm_distances, out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
def _setdiag(a, k, v):
if not a.ndim == 2:
raise ValueError("Input array should be 2-D.")
M,N = a.shape
if k > 0:
start = k
num = N - k
else:
num = M + k
start = abs(k)*N
end = start + num*(N+1)-1
a.flat[start:end:(N+1)] = v
# Return the spline that minimizes the dis-continuity of the
# "order-th" derivative; for order >= 2.
def _find_smoothest2(xk, yk):
N = len(xk) - 1
Np1 = N + 1
# find pseudo-inverse of B directly.
Bd = np.empty((Np1, N))
for k in range(-N,N):
if (k < 0):
l = np.arange(-k, Np1)
v = (l+k+1)
if ((k+1) % 2):
v = -v
else:
l = np.arange(k,N)
v = N - l
if ((k % 2)):
v = -v
_setdiag(Bd, k, v)
Bd /= (Np1)
V2 = np.ones((Np1,))
V2[1::2] = -1
V2 /= math.sqrt(Np1)
dk = np.diff(xk)
b = 2*np.diff(yk, axis=0)/dk
J = np.zeros((N-1,N+1))
idk = 1.0/dk
_setdiag(J,0,idk[:-1])
_setdiag(J,1,-idk[1:]-idk[:-1])
_setdiag(J,2,idk[1:])
A = dot(J.T,J)
val = dot(V2,dot(A,V2))
res1 = dot(np.outer(V2,V2)/val,A)
mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
return mk
def _get_spline2_Bb(xk, yk, kind, conds):
Np1 = len(xk)
dk = xk[1:]-xk[:-1]
if kind == 'not-a-knot':
# use banded-solver
nlu = (1,1)
B = ones((3,Np1))
alpha = 2*(yk[1:]-yk[:-1])/dk
zrs = np.zeros((1,)+yk.shape[1:])
row = (Np1-1)//2
b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0)
B[0,row+2:] = 0
B[2,:(row-1)] = 0
B[0,row+1] = dk[row-1]
B[1,row] = -dk[row]-dk[row-1]
B[2,row-1] = dk[row]
return B, b, None, nlu
else:
raise NotImplementedError("quadratic %s is not available" % kind)
def _get_spline3_Bb(xk, yk, kind, conds):
# internal function to compute different tri-diagonal system
# depending on the kind of spline requested.
# conds is only used for 'second' and 'first'
Np1 = len(xk)
if kind in ['natural', 'second']:
if kind == 'natural':
m0, mN = 0.0, 0.0
else:
m0, mN = conds
# the matrix to invert is (N-1,N-1)
# use banded solver
beta = 2*(xk[2:]-xk[:-2])
alpha = xk[1:]-xk[:-1]
nlu = (1,1)
B = np.empty((3,Np1-2))
B[0,1:] = alpha[2:]
B[1,:] = beta
B[2,:-1] = alpha[1:-1]
dyk = yk[1:]-yk[:-1]
b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1])
b *= 6
b[0] -= m0
b[-1] -= mN
def append_func(mk):
# put m0 and mN into the correct shape for
# concatenation
ma = array(m0,copy=0,ndmin=yk.ndim)
mb = array(mN,copy=0,ndmin=yk.ndim)
if ma.shape[1:] != yk.shape[1:]:
ma = ma*(ones(yk.shape[1:])[np.newaxis,...])
if mb.shape[1:] != yk.shape[1:]:
mb = mb*(ones(yk.shape[1:])[np.newaxis,...])
mk = np.concatenate((ma,mk),axis=0)
mk = np.concatenate((mk,mb),axis=0)
return mk
return B, b, append_func, nlu
elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout',
'parabolic']:
if kind == 'endslope':
# match slope of lagrange interpolating polynomial of
# order 3 at end-points.
x0,x1,x2,x3 = xk[:4]
sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0]
sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1]
sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2]
sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3]
xN3,xN2,xN1,xN0 = xk[-4:]
sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1]
sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2]
sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3]
sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4]
elif kind == 'clamped':
sl_0, sl_N = 0.0, 0.0
elif kind == 'first':
sl_0, sl_N = conds
# Now set up the (N+1)x(N+1) system of equations
beta = np.r_[0,2*(xk[2:]-xk[:-2]),0]
alpha = xk[1:]-xk[:-1]
gamma = np.r_[0,alpha[1:]]
B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1)
d1 = alpha[0]
dN = alpha[-1]
if kind == 'not-a-knot':
d2 = alpha[1]
dN1 = alpha[-2]
B[0,:3] = [d2,-d1-d2,d1]
B[-1,-3:] = [dN,-dN1-dN,dN1]
elif kind == 'runout':
B[0,:3] = [1,-2,1]
B[-1,-3:] = [1,-2,1]
elif kind == 'parabolic':
B[0,:2] = [1,-1]
B[-1,-2:] = [-1,1]
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
B[0,:2] = [2*d1,d1]
B[-1,-2:] = [dN,2*dN]
# Set up RHS (b)
b = np.empty((Np1,)+yk.shape[1:])
dyk = (yk[1:]-yk[:-1])*1.0
if kind in ['not-a-knot', 'runout', 'parabolic']:
b[0] = b[-1] = 0.0
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
b[0] = (dyk[0]/d1 - sl_0)
b[-1] = -(dyk[-1]/dN - sl_N)
b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1])
b *= 6.0
return B, b, None, None
else:
raise ValueError("%s not supported" % kind)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# If conds is None, then use the not_a_knot condition
# at K-1 farthest separated points in the interval
def _find_not_a_knot(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued second
# derivative at K-1 farthest separated points
def _find_natural(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued first
# derivative at K-1 farthest separated points
def _find_clamped(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def _find_fixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then use coefficient periodicity
# If conds is 'function' then use function periodicity
def _find_periodic(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# Doesn't use conds
def _find_symmetric(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# conds is a dictionary with multiple values
def _find_mixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj,cvals,k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),)+index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv)
res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv)
else:
res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv)
res.shape = oldshape + sh
return res
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple.
"""
return ppform.fromspline(xk, cvals, k)
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)
| bsd-3-clause |
sunericd/ISTools | GeneTK/mods/gel_visualizer.py | 1 | 8025 | # 07/09/2017 Modified to take a list of sequences, change window sizes to accommodate more gels, and label columns.
import pandas as pd
from graphics import *
from math import *
from PIL import Image as NewImage
#'Runs' the plasmid_seqs through a 'gel' with restriction enzymes from re_list. restriction_sites is a database of restriction enzymes.
def gel_visualize(plasmid_seqs, re_list, restriction_sites):
max_lengths, lengths_list = digestSeq(plasmid_seqs, re_list, restriction_sites)
#print (max_lengths)
#print (lengths_list)
if max(max_lengths) > 1000:
#print("big")
bigDraw(lengths_list)
else:
#print("Small")
smallDraw(lengths_list)
def digestSeq (plasmid_seqs, re_list,restriction_sites):
plasmid_seqs = plasmid_seqs.split()
# Changes all to upper case (so user can input case insensitive)
plasmid_seqs_upper = []
for plasmid_seq in plasmid_seqs:
plasmid_seqs_upper.append(plasmid_seq.upper())
plasmid_seqs = plasmid_seqs_upper
# ERROR HANDLING
# 30 well maximum
if len(plasmid_seqs) > 30:
raise UserWarning(
'You have exceeded the 30 well maximum. Also, check to ensure that the only spaces are between sequences'
)
for plasmid_seq in plasmid_seqs:
# Max ladder is 3KB
if len(plasmid_seq) > 3000:
raise UserWarning(
'Our gel cannot handle sequences larger than 3KB!'
)
# Only capital A,T,C,G
for nuc in plasmid_seq:
if nuc not in "ATCG":
raise UserWarning(
'Sequences must only contain A, T, C, or G!'
)
# Parsing biolabs restriction sites
restriction_enzymes = restriction_sites.values.T.tolist()
restriction_enzymes = restriction_enzymes[0]
restriction_sites = restriction_sites.values.tolist()
restriction_sites = restriction_sites[1:]
# Cleaning NaNs
restriction_seqs = []
for site in restriction_sites:
clean_site = [x for x in site if str(x) != 'nan']
del clean_site[0]
restriction_seqs.append(clean_site)
del restriction_enzymes[0]
# Matching restriction enzyme names ERROR
for re in re_list:
if re not in restriction_enzymes:
raise UserWarning(
str(re) + ' is not in our list. Please check to make sure that the enzyme name matches the names in our Enzyme List.'
)
##### Gel Visualization ######
lengths_list = []
max_lengths = []
# Finding cut indicies
for plasmid_seq in plasmid_seqs:
cut_idxs = []
for re in re_list:
re_index = restriction_enzymes.index(re)
seq_index = 0
for cut_site in restriction_seqs[re_index]:
site_idx = plasmid_seq.find(restriction_seqs[re_index][seq_index])
if site_idx != -1:
cut_idxs.append(site_idx)
while site_idx < len(plasmid_seq):
site_idx = plasmid_seq.find(restriction_seqs[re_index][seq_index], site_idx)
if site_idx == -1:
break
cut_idxs.append(site_idx)
site_idx += len(restriction_seqs[re_index][seq_index])
seq_index += 1
cut_idxs.sort()
# print(cut_idxs)
cut_idxs.append(len(plasmid_seq))
lengths = []
start_idx = 0
# Collecting fragment lengths
for idx in cut_idxs:
frag = plasmid_seq[start_idx:idx]
lengths.append(len(frag))
start_idx = idx
# Since plasmids are circular
if len(lengths) >= 2:
connected_length = lengths[0] + lengths[-1]
del lengths [0]
del lengths [-1]
lengths.append(connected_length)
# Cleaning lengths
lengths = [x for x in lengths if str(x) != '0']
lengths_list.append(lengths)
max_lengths.append(max(lengths))
#print ('Fragment Lengths:')
#print (lengths)
return (max_lengths, lengths_list)
#Small Ladder
def smallDraw (lengths):
numSeq=len(lengths)
if numSeq>2:
x=150*0.2*(numSeq+3)
else:
x=150
win=GraphWin('Gel',x,300)
xWin=win.getWidth()
yWin=win.getHeight()
standardX=150*0.3
standardY=yWin*0.1
lenBand=150*0.15
scale=1.35
listBText=['1.0 kb','0.8 kb', '0.6 kb', '0.5 kb', '0.4 kb', '0.3 kb', '0.2 kb','0.15 kb','0.1 kb','0.075 kb' , '0.05 kb']
listBand=[float(1000),float(800),float(600),float(500),float(400),float(300),float(200),float(150),float(100),float(75),float(50)]
listY=[]
for i in listBand:
listY.append(float(-1/0.098*(log10(i)-3.8494)))
largest=max(listY)
for i in range(len(listY)):
listY[i]=float(listY[i])/largest*yWin
for i in range(len(listY)):
yVal=scale*(listY[i]-min(listY)+15)
pt=Point(standardX,yVal)
l=Line(Point(standardX+lenBand,yVal),pt)
kblabel=Text(Point((standardX)/2,yVal),listBText[i])
kblabel.setSize(7)
l.draw(win)
kblabel.draw(win)
stdlabel=Text(Point(standardX+0.5*lenBand,yWin*0.02),'ladder')
stdlabel.setSize(7)
stdlabel.draw(win)
for i in range(numSeq):
testX=standardX+150*0.2*(i+1)
testY=yWin*0.1
numlabel=Text(Point(testX+0.5*lenBand,yWin*0.02),(i+1))
numlabel.setSize(7)
numlabel.draw(win)
listTY=[]
for j in lengths[i]:
listTY.append(float(-1/0.098*(log10(j)-3.8494)))
for j in range(len(listTY)):
listTY[j]=float(listTY[j])/largest*yWin
for j in range(len(listTY)):
yVal=scale*(listTY[j]-min(listY)+15)
pt=Point(testX,yVal)
l=Line(Point(testX+lenBand,yVal),pt)
l.draw(win)
# Saving File
# win.postscript(file="Gel.eps", colormode='color')
# img = NewImage.open("Gel.eps")
# PNG_Title = "Gel.png"
# img.save(PNG_Title, "png")
#Large Ladder
def bigDraw (lengths):
numSeq=len(lengths)
if numSeq>2:
x=150*0.2*(numSeq+3)
else:
x=150
win=GraphWin('Gel',x,250)
xWin=win.getWidth()
yWin=win.getHeight()
standardX=150*0.3
standardY=yWin*0.1
lenBand=150*0.15
scale=1.1
listBText=['3.0 kb','2.0 kb', '1.5 kb', '1.0 kb', '0.8 kb', '0.6 kb', '0.5 kb','0.4 kb','0.3 kb','0.2 kb','0.15 kb' , '0.1 kb']
listBand=[float(3000),float(2000),float(1500),float(1000),float(800),float(600),float(500),float(400),float(300),float(200),float(150), float(100)]
listY=[]
for i in listBand:
listY.append(float(-1/0.098*(log10(i)-3.8494)))
largest=max(listY)
for i in range(len(listY)):
listY[i]=float(listY[i])/largest*yWin
for i in range(len(listY)):
yVal=scale*(listY[i]-min(listY)+15)
pt=Point(standardX,yVal)
l=Line(Point(standardX+lenBand,yVal),pt)
kblabel=Text(Point((standardX)/2,yVal),listBText[i])
l.draw(win)
kblabel.draw(win)
kblabel.setSize(7)
stdlabel=Text(Point(standardX+0.5*lenBand,yWin*0.02),'ladder')
stdlabel.setSize(7)
stdlabel.draw(win)
for i in range(numSeq):
testX=standardX+150*0.2*(i+1)
testY=yWin*0.1
numlabel=Text(Point(testX+0.5*lenBand,yWin*0.02),(i+1))
numlabel.setSize(7)
numlabel.draw(win)
listTY=[]
for j in lengths[i]:
listTY.append(float(-1/0.098*(log10(j)-3.8494)))
for j in range(len(listTY)):
listTY[j]=float(listTY[j])/largest*yWin
for j in range(len(listTY)):
yVal=scale*(listTY[j]-min(listY)+15)
pt=Point(testX,yVal)
l=Line(Point(testX+lenBand,yVal),pt)
l.draw(win)
| mit |
mxjl620/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
kmiddleton/ThinkStats2 | code/hypothesis.py | 75 | 10162 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import nsfg
import nsfg2
import first
import thinkstats2
import thinkplot
import copy
import random
import numpy as np
import matplotlib.pyplot as pyplot
class CoinTest(thinkstats2.HypothesisTest):
"""Tests the hypothesis that a coin is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
heads, tails = data
test_stat = abs(heads - tails)
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
heads, tails = self.data
n = heads + tails
sample = [random.choice('HT') for _ in range(n)]
hist = thinkstats2.Hist(sample)
data = hist['H'], hist['T']
return data
class DiffMeansPermute(thinkstats2.HypothesisTest):
"""Tests a difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
group1, group2 = self.data
self.n, self.m = len(group1), len(group2)
self.pool = np.hstack((group1, group2))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
class DiffMeansOneSided(DiffMeansPermute):
"""Tests a one-sided difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
class DiffStdPermute(DiffMeansPermute):
"""Tests a one-sided difference in standard deviation by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
class CorrelationPermute(thinkstats2.HypothesisTest):
"""Tests correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
xs, ys = data
test_stat = abs(thinkstats2.Corr(xs, ys))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
xs, ys = self.data
xs = np.random.permutation(xs)
return xs, ys
class DiceTest(thinkstats2.HypothesisTest):
"""Tests whether a six-sided die is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
n = sum(self.data)
values = [1,2,3,4,5,6]
rolls = np.random.choice(values, n, replace=True)
hist = thinkstats2.Hist(rolls)
freqs = hist.Freqs(values)
return freqs
class DiceChiTest(DiceTest):
"""Tests a six-sided die using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
class PregLengthTest(thinkstats2.HypothesisTest):
"""Tests difference in pregnancy length using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: pair of lists of pregnancy lengths
"""
firsts, others = data
stat = self.ChiSquared(firsts) + self.ChiSquared(others)
return stat
def ChiSquared(self, lengths):
"""Computes the chi-squared statistic.
lengths: sequence of lengths
returns: float
"""
hist = thinkstats2.Hist(lengths)
observed = np.array(hist.Freqs(self.values))
expected = self.expected_probs * len(lengths)
stat = sum((observed - expected)**2 / expected)
return stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
firsts, others = self.data
self.n = len(firsts)
self.pool = np.hstack((firsts, others))
pmf = thinkstats2.Pmf(self.pool)
self.values = range(35, 44)
self.expected_probs = np.array(pmf.Probs(self.values))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
def RunDiceTest():
"""Tests whether a die is fair.
"""
data = [8, 9, 19, 5, 8, 11]
dt = DiceTest(data)
print('dice test', dt.PValue(iters=10000))
dt = DiceChiTest(data)
print('dice chi test', dt.PValue(iters=10000))
def FalseNegRate(data, num_runs=1000):
"""Computes the chance of a false negative based on resampling.
data: pair of sequences
num_runs: how many experiments to simulate
returns: float false negative rate
"""
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = thinkstats2.Resample(group1)
sample2 = thinkstats2.Resample(group2)
ht = DiffMeansPermute((sample1, sample2))
p_value = ht.PValue(iters=101)
if p_value > 0.05:
count += 1
return count / num_runs
def PrintTest(p_value, ht):
"""Prints results from a hypothesis test.
p_value: float
ht: HypothesisTest
"""
print('p-value =', p_value)
print('actual =', ht.actual)
print('ts max =', ht.MaxTestStat())
def RunTests(data, iters=1000):
"""Runs several tests on the given data.
data: pair of sequences
iters: number of iterations to run
"""
# test the difference in means
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute two-sided')
PrintTest(p_value, ht)
ht.PlotCdf()
thinkplot.Save(root='hypothesis1',
title='Permutation test',
xlabel='difference in means (weeks)',
ylabel='CDF',
legend=False)
# test the difference in means one-sided
ht = DiffMeansOneSided(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute one-sided')
PrintTest(p_value, ht)
# test the difference in std
ht = DiffStdPermute(data)
p_value = ht.PValue(iters=iters)
print('\nstd permute one-sided')
PrintTest(p_value, ht)
def ReplicateTests():
"""Replicates tests with the new NSFG data."""
live, firsts, others = nsfg2.MakeFrames()
# compare pregnancy lengths
print('\nprglngth2')
data = firsts.prglngth.values, others.prglngth.values
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
print('\nbirth weight 2')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation 2')
PrintTest(p_value, ht)
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared 2')
PrintTest(p_value, ht)
def main():
thinkstats2.RandomSeed(17)
# run the coin test
ct = CoinTest((140, 110))
pvalue = ct.PValue()
print('coin test p-value', pvalue)
# compare pregnancy lengths
print('\nprglngth')
live, firsts, others = first.MakeFrames()
data = firsts.prglngth.values, others.prglngth.values
RunTests(data)
# compare birth weights
print('\nbirth weight')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation')
print('n=', len(live2))
PrintTest(p_value, ht)
# run the dice test
RunDiceTest()
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared')
PrintTest(p_value, ht)
# compute the false negative rate for difference in pregnancy length
data = firsts.prglngth.values, others.prglngth.values
neg_rate = FalseNegRate(data)
print('false neg rate', neg_rate)
# run the tests with new nsfg data
ReplicateTests()
if __name__ == "__main__":
main()
| gpl-3.0 |
ananthamurthy/eyeBlinkBehaviour | analysis/old/analyze_dir.py | 1 | 5444 | #!/usr/bin/env python
"""analyze_dir.py:
Analyze a given directory. All trials are accumulated and plotted.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2016, Dilawar Singh "
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "[email protected]"
__status__ = "Development"
import os
import sys
import numpy as np
import dateutil
import dateutil.parser
import matplotlib
import matplotlib.pyplot as plt
from collections import defaultdict
import logging
import re
import analyze_trial as at
import matplotlib.cm as cm
from scipy.interpolate import interp1d
#matplotlib.rcParams.update( {'font.size' : 10} )
args_ = None
csplus, csminus = [], []
csplusIdx, csminusIdx = [], []
distraction = []
distractionIdx = []
probes = []
probesIdx = []
# This is the valid trial time.
min_trial_time_ = 17500 # ms
def plot_subplot( ax, data, idx, tvecs, title ):
assert len( data ) > 2
assert len(tvecs) == len(data)
# Dimention mismatch, use histogram2d
newImg = []
for i, tvec in enumerate( tvecs ):
t = tvec.values
d = data[i].values
# Nearest gives the best fit.
f = interp1d( t, d, kind='nearest' )
tnew = np.arange( 200, min_trial_time_, 5 )
dnew = f( tnew )
meanErr = abs(np.mean( dnew ) - np.mean( d ))
stdErr = abs(np.std( dnew ) - np.std( d ))
if meanErr > 1.0:
print( '[Warn] Got error in mean after resampling %f' % meanErr )
if stdErr > 2.0:
print( '[WARN] Got error in std %f' % stdErr )
newImg.append( dnew )
plt.imshow( np.vstack(newImg), cmap = "jet"
, interpolation = 'none', aspect='auto'
)
# ax.set_xticks( range(0,len(idx),2), idx[::2] )
ax.set_xlabel( 'Time (ms)' )
ax.set_ylabel( '# Trial' )
ax.set_title( title )
ax.legend( )
def main( ):
global args_
if not args_.output_dir:
args_.output_dir = os.path.join(args_.dir, '_plots')
if not os.path.isdir( args_.output_dir):
os.makedirs( args_.output_dir )
files = {}
print( '[INFO] Searching in %s' % args_.dir )
for d, sd, fs in os.walk( args_.dir ):
for f in fs:
ext = f.split('.')[-1]
if ext == 'dat':
filepath = os.path.join(d, f)
trialIndex = re.search( r'trial=(?P<index>\d+)\.%s'%ext, filepath)
if trialIndex:
index = int(trialIndex.groupdict()['index'])
files[index] = (filepath, f)
# Sort the trials according to trial number
fileIdx = sorted( files )
if len(fileIdx) == 0:
print('[WARN] No files found' )
quit()
cstvecs, probetvecs = [], []
for idx in fileIdx:
f, fname = files[idx]
result = None
try:
result = at.main( { 'input' : f
, 'output' : os.path.join(args_.output_dir, fname+'.png')
, 'plot' : False }
)
except Exception as e:
result = None
print( '[WARN] Failed to process file %s' % f )
if not result:
continue
tVec = result['time']
if tVec.max() < min_trial_time_:
print( '[WARN] Ignoring file %s' % fname )
continue
row = result['sensor']
if len(row) > 100:
r = row
if idx % 10 == 0:
probetvecs.append( tVec )
probes.append( r )
probesIdx.append( idx )
else:
csplus.append( r )
csplusIdx.append( idx )
cstvecs.append( tVec )
if csplus:
ax = plt.subplot(2, 1, 1)
print( 'Plotting csplus' )
plot_subplot( ax, csplus, csplusIdx, cstvecs, 'CS+' )
plt.colorbar()
if probes:
ax = plt.subplot(2, 1, 2)
print( 'Plotting probes' )
plot_subplot( ax, probes, probesIdx, probetvecs, 'PROBES' )
plt.colorbar( )
outfile = '%s/summary.png' % args_.output_dir
print('[INFO] Saving file to %s' % outfile )
plt.suptitle( args_.dir.split('/')[-1].replace('_', ', ')
, horizontalalignment = 'left'
, fontsize = 9 )
plt.savefig( outfile )
if __name__ == '__main__':
import argparse
# Argument parser.
description = '''Summaries data in one directory'''
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--dir', '-d'
, required = True
, help = 'Directory to seach for behaviour data '
)
parser.add_argument('--max', '-m'
, required = False
, default = -1
, help = 'Max number of trials to be plotted. Default all'
)
parser.add_argument('--subplots', '-s'
, action = 'store_true'
, help = 'Each trial in subplot.'
)
parser.add_argument('--output_dir', '-o'
, required = False
, default = ''
, help = 'Directory to save results.'
)
parser.add_argument('--analysis', '-a'
, required = False
, default = 'heatmap'
, help = 'heatmap, default = heatmap'
)
class Args: pass
args_ = Args()
parser.parse_args(namespace=args_)
main( )
| gpl-3.0 |
shahankhatch/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
HasanIssa88/EMG_Classification | Plot_EMG.py | 1 | 3400 |
import os
import glob
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
def plot_EMG(file_name):
df=pd.read_csv(file_name,sep=',',skiprows=13,header=None,na_values="null",delimiter=',')
df.columns=['Force_sensor','EMG_radial_1','EMG_radial_2','EMG_radial_3','EMG_radial_4',
'EMG_radial_5','EMG_radial_6','EMG_special_1','EMG_special_2','EMG_special_3','EMG_special_4']
# first sensor Configuration
Force_sensor=df['Force_sensor']
EMG_radial_1=df['EMG_radial_1']
EMG_radial_2=df['EMG_radial_2']
EMG_radial_3=df['EMG_radial_3']
EMG_radial_4=df['EMG_radial_4']
EMG_radial_5=df['EMG_radial_5']
EMG_radial_6=df['EMG_radial_6']
# second sensor Configuration
EMG_special_1=df['EMG_special_1']
EMG_special_2=df['EMG_special_2']
EMG_special_3=df['EMG_special_3']
EMG_special_4=df['EMG_special_4']
# time vectors
Sampl_Freq=1000
Sampl_Time=1/Sampl_Freq
time_data_force =np.linspace(0,len(Force_sensor),num=len(Force_sensor))
time_data_radial=np.linspace(0,len(EMG_radial_1),num=len(EMG_radial_1))
time_data_special=np.linspace(0,len(EMG_special_1),num=len(EMG_special_1))
# plot graphs
f,(ax1,ax2,ax3,ax4,ax5,ax6,ax7)=plt.subplots(7,sharex=True)
ax1.set_title('Raw data, Force and EMG configuration 1')
ax1.plot(time_data_force, Force_sensor,'r')
ax1.set_ylim(-10,10)
ax1.set_ylabel('Force(Kg)')
#ax1.set_yticklabels([])
ax2.plot(time_data_radial, EMG_radial_1,)
ax2.set_ylim(-0.1,0.1)
ax2.set_yticklabels([])
ax2.set_ylabel('Ch1[mV]')
ax3.plot(time_data_radial,EMG_radial_2)
ax3.set_ylim(-0.5,0.5)
ax3.set_yticklabels([])
ax3.set_ylabel('Ch2[mV]')
ax4.plot(time_data_radial,EMG_radial_3)
ax4.set_ylim(-0.5,0.5)
ax4.set_yticklabels([])
ax4.set_ylabel('Ch3[mV]')
ax5.plot(time_data_radial,EMG_radial_4)
ax5.set_ylim(-0.5,0.5)
ax5.set_yticklabels([])
ax5.set_ylabel('Ch4[mV]')
ax6.plot(time_data_radial,EMG_radial_5)
ax6.set_ylim(-1,1)
ax6.set_yticklabels([])
ax6.set_ylabel('Ch5[mV]')
ax7.plot(time_data_radial,EMG_radial_6)
ax7.set_ylim(-1,1)
ax7.set_yticklabels([])
ax7.set_ylabel('Ch6[mV]')
ax7.set_xlabel('Time[ms]')
plt.hold()
f,(ax1,ax2,ax3,ax4,ax5)=plt.subplots(5,sharex=True)
ax1.plot(time_data_force, Force_sensor,'r')
ax1.set_ylim(-10,10)
ax1.set_ylabel('Force(Kg)')
#ax1.set_yticklabels([])
ax1.set_xticklabels([])
ax1.set_title('Raw data, Force and EMG configuration 2')
ax2.plot(time_data_special, EMG_special_1,)
ax2.set_ylim(-1,1)
ax2.set_yticklabels([])
ax2.set_ylabel('Ch7[mV]')
ax2.set_xticklabels([])
ax3.plot(time_data_special, EMG_special_2,)
ax3.set_ylim(-1,1)
ax3.set_yticklabels([])
ax3.set_ylabel('Ch8[mV]')
ax4.plot(time_data_special, EMG_special_3,)
ax4.set_ylim(-0.2,0.2)
ax4.set_yticklabels([])
ax4.set_ylabel('Ch9[mV]')
ax4.set_xticklabels([])
ax5.plot(time_data_special, EMG_special_4,)
ax5.set_ylim(-0.2,0.2)
ax5.set_yticklabels([])
ax5.set_ylabel('Ch10[mV]')
ax5.set_xlabel('Time[ms]')
plt.show()
| gpl-3.0 |
nkhuyu/office-nfl-pool | transform.py | 6 | 4239 | """
transform
~~~~~~~~~
Helper functions for data manipulation using Pandas.
"""
import numpy as np
import pandas as pd
def from_byteam_to_bygame(df, augment=True, dont_mirror=[]):
"""Tranform data with one row per team to one row per game.
In the 'byteam' format, there is one row per team -- one for the
Home team and one for the Away team. In the 'bygame' format, there
is one row per game, plus one row per team per bye week.
If 'augment' is True, then prefix every column not in
'dont_mirror' with an 'A_' for 'Away' or an 'H_' for 'Home'
to make separate columns for each team.
"""
common_cols = ['Season', 'Week', 'Home', 'Away']
home = df[df.AtHome]
del home['AtHome']
if not augment:
return home
# Otherwise augment the stats so that each column in the
# dataset is prefixed with an 'H' for the home team or 'A' for
# the away team.
away = df[df.AtHome == False]
del away['AtHome']
new_home_colnames = []
new_away_colnames = []
for c in home.columns:
if c in common_cols:
new_home_colnames.append(c)
new_away_colnames.append(c)
elif c == 'Team':
new_home_colnames.append('Home')
new_away_colnames.append('Away')
elif c == 'Opponent':
new_home_colnames.append('Away')
new_away_colnames.append('Home')
elif c in dont_mirror:
new_home_colnames.append(c)
del away[c]
else:
new_home_colnames.append('H_' + c)
new_away_colnames.append('A_' + c)
home.columns = new_home_colnames
away.columns = new_away_colnames
# Left join because the 'bye' weeks are designated 'AtHome'
return home.merge(away, on=common_cols, how='left')
def add_derived_columns(df):
"""Add 'Spread', 'WinPct', and a flag 'LastWkBye'
Make your own here if you like.
"""
# Spread
df['Spread'] = df.Points - df.PointsAllowed
# WinPct
# Cumulative win percent to date in a season.
df['WinPct'] = df.groupby(('Team','Season'))['Spread'].apply(
lambda s: ( 0.5 * (s == 0).cumsum() + (s > 0).cumsum() )
/ s.notnull().cumsum() )
# LastWkBye
# True if last week's game was a Bye.
# This requires grouping by Team and Season.
# Be sure to sort first.
df.sort(['Team', 'Season','Week'], inplace=True)
df['LastWkBye'] = df.groupby(['Team','Season'])['Spread'
].shift(1).isnull().fillna(False)
# The first week is null because it was shifted; make it False.
df.ix[df.Week == 1, 'LastWkBye'] = False
def add_rolling_mean(df, cols, prefix='m{window}wk_', window=5, min_periods=2):
"""Add a rolling mean to each column in 'cols'.
Prefix the new columns with the stated prefix.
Sort the data by Team, Season, and Week, and group by Team
when calculating the moving averages, so the averages
will cross over seasons.
"""
df.sort(['Team', 'Season', 'Week'], inplace=True)
for c in cols:
colname = prefix.format(window=window) + c
df[colname] = df.groupby('Team')[c].apply(
pd.rolling_mean, window=window, min_periods=min_periods)
def add_ewma(df, cols, prefix='ewma{center}wk_', center=2):
"""Add an exponentially weighted moving average to columns in 'cols'.
Prefix the new columns with the stated prefix.
Sort the data by Team, Season, and Week, and group by Team
when calculating the moving averages, so the averages
will cross over seasons.
"""
df.sort(['Team', 'Season', 'Week'], inplace=True)
for c in cols:
colname = prefix.format(center=center) + c
df[colname] = df.groupby('Team')[c].apply(
pd.ewma, center)
def add_lag(df, cols, prefix='lag{lag}_', lag=1):
"""Add the stated lag to each column in 'cols'.
Prefix the new columns with the stated prefix.
Sort the data by Team, Season, and Week, and group by Team,
so the lags will cross over seasons.
"""
df.sort(['Team', 'Season', 'Week'], inplace=True)
for c in cols:
colname = prefix.format(lag=lag) + c
df[colname] = df.groupby('Team')[c].shift(lag)
| mit |
dcc-ufrj/SimuQoS | classes/model/GraficoClass.py | 1 | 25445 |
# -*- coding: utf-8 -*-
'''
Created on 02/11/2012
@author: diogo
'''
from MatematicaClass import MatematicaClass
'''
from series import Series
import cairo
import cairoplot
'''
import os
import glob
import matplotlib.pyplot as plt
import numpy
from pylab import *
class GraficoClass(object):
'''
Classe geradora de graficos
'''
def __init__(self,caminhos=[os.path.join('..','resultado','aleatory'),os.path.join('..','resultado','sequential')], caminho = os.path.join('..','grafico')):
'''
Inicializa classe geradora de graficos com arquivo que sera usado para gera-los
'''
self.caminhos = caminhos;
self.caminho = caminho;
self.largura = 1440;
self.altura = 1080;
self.arquivos = []
self.getArquivos()
def getArquivos(self):
for caminho in self.caminhos:
self.arquivos += glob.glob(os.path.join(caminho,"*.txt"));
# def tempoDownload(self,nome='tempoxNdownload.svg'):
'''
Organiza dados de tempo x numero de dwonloads efetivados
x=[tempo1,tempo2, ...]
y=[nDownload, nDownload, ...]
total = {};
for arquivo in self.arquivos:
self.aberto = open(arquivo,"r");
tempo,quantidade =[],[];
numero = 0
tempo.append('0');
for linha in self.aberto:
linha = linha.rstrip();
lista = linha.split(',');
if (lista[0] == 'T'):
tempo.append(lista[1]);
if lista[1] > 1:
quantidade.append(numero);
numero = 0
elif (lista[0] == 'D'):
while '' in lista:
lista.remove('');
if(len(lista) > 1):
indice = 3;
while(lista[indice]):
if (lista[indice] == '100'):
numero += 1;
try:
indice += 3;
lista[indice]
except:
break
quantidade.append(numero);
self.aberto.close();
total[arquivo] = quantidade
data = Series(total)
cairoplot.dot_line_plot(os.path.join(self.caminho,nome),data,self.largura,self.altura,x_labels = tempo,
axis = True, grid = True, x_title = "Tempo", y_title = "Nº de Downloads", series_legend=True);
'''
def tempoDownloadAcumulado(self,nome='tempoxNdownloadacum.svg'):
'''
Organiza dados de arquivo x numero de dwonloads efetivados
x=[arquivo1,arquivo2, ...]
y=[nDownload, nDownload, ...]
'''
total = {};
dados = [];
for arquivo in self.arquivos:
self.aberto = open(arquivo,"r");
quantidade =[];
numero = 0
for linha in self.aberto:
linha = linha.rstrip();
lista = linha.split(',');
if (lista[0] == 'D'):
while '' in lista:
lista.remove('');
if(len(lista) > 1):
indice = 3;
while(lista[indice]):
if (lista[indice] == '100'):
numero += 1;
try:
indice += 3;
lista[indice]
except:
break
quantidade.append(numero);
self.aberto.close();
dados.append(quantidade)
total[arquivo] = quantidade
mat = MatematicaClass(total)
#montando novo
organizados, N = mat.confianca()
N = 4
fig = plt.figure()
ax = fig.add_subplot(111)
nomesProntos, mediasProntas, ercProntos, erbProntos = [],[0],[0],[0]
cor, rects = [],[]
cm = get_cmap('gist_rainbow')
for i in range(N):
cor.append(cm(1.*i/N)) # color will now be an RGBA tuple
cor = iter(cor)
dictItems = {'sequentialaleatory':[],'aleatorysequential':[],'sequentialsequential':[],'aleatoryaleatory':[]}
for elementos in organizados.values():
for elemento in elementos:
item = ItemClass(elemento)
item.elemento = elementos[elemento]
dictItems[item.tipoDownload+item.tipoConsumo].append(item)
dictItems[item.tipoDownload+item.tipoConsumo].sort(key = lambda x: int(x.download))
for elementos in dictItems.values():
for elemento in elementos:
strNome = str(elemento.tipoDownload) + " Download"+ ", "+ str(elemento.tipoConsumo)+" Consumption"
if strNome not in nomesProntos:
nomesProntos.append(strNome)
media, (erc,erb) = elemento.elemento[-1]['confianca'][0]
media, erc, erb
mediasProntas.append(float("{0:.2f}".format(media)))
ercProntos.append(media-erc)
erbProntos.append(erb-media)
legendasX = [0,10,30,50,70,100]
rects.append(ax.errorbar(legendasX, mediasProntas, yerr=[erbProntos,ercProntos],color=cor.next(),fmt='--'))
mediasProntas, ercProntos, erbProntos = [0],[0],[0]
# algo mais
ax.set_ylabel('Quantidade de Downloads Finalizados')
ax.set_xlabel('Taxa de Download')
ax.set_title('Download Acumulado')
ax.grid(True)
ax.set_xticks(legendasX)
ax.set_xticklabels(legendasX)
ax.set_ylim(bottom=-10,top=7000)
#ax.set_yscale('log')
leg = ax.legend(rects,nomesProntos,loc='upper center', bbox_to_anchor=(0.5,-0.1))
for t in leg.get_texts():
t.set_fontsize('small')
plt.savefig(os.path.join(self.caminho,nome), bbox_extra_artists=(leg,), bbox_inches='tight')
ax.set_yscale('log')
plt.savefig(os.path.join(self.caminho,'log'+nome), bbox_extra_artists=(leg,), bbox_inches='tight')
#fim da montagem
#cairoplot.horizontal_bar_plot(os.path.join(self.caminho,nome),dados,self.largura,self.altura, y_labels = self.arquivos,
# border = 20, display_values = True, rounded_corners = True, colors = [(0.5,0.2,0)]);
# def tempoConsumo(self,nome='tempoxNconsumo.svg'):
'''
Organiza dados de tempo x numero de consumos efetivados
x=[tempo1,tempo2, ...]
y=[nConsumo, nConsumo, ...]
total = {};
for arquivo in self.arquivos:
self.aberto = open(arquivo,"r");
tempo,quantidade =[],[];
numero = 0
tempo.append('0');
for linha in self.aberto:
linha = linha.rstrip();
lista = linha.split(',');
if (lista[0] == 'T'):
tempo.append(lista[1]);
if lista[1] > 1:
quantidade.append(numero);
numero = 0
elif (lista[0] == 'C'):
while '' in lista:
lista.remove('');
if(len(lista) > 1):
indice = 3;
while(lista[indice]):
if (lista[indice] == '100'):
numero += 1;
try:
indice += 3;
lista[indice]
except:
break
quantidade.append(numero);
self.aberto.close();
total[arquivo] = quantidade
data = Series(total)
cairoplot.dot_line_plot(os.path.join(self.caminho,nome),data,self.largura,self.altura,x_labels = tempo,
axis = True, grid = True, x_title = "Tempo", y_title = "Nº de Consumo", series_legend=True);
'''
def tempoConsumoAcumulado(self,nome='tempoxNconsumoacum.svg'):
'''
Organiza dados de arquivo x numero de consumos efetivados
x=[arquivo1,arquivo2, ...]
y=[nConsumo, nConsumo, ...]
'''
total = {};
dados = [];
for arquivo in self.arquivos:
self.aberto = open(arquivo,"r");
quantidade =[];
numero = 0
for linha in self.aberto:
linha = linha.rstrip();
lista = linha.split(',');
if (lista[0] == 'C'):
while '' in lista:
lista.remove('');
if(len(lista) > 1):
indice = 3;
while(lista[indice]):
if (lista[indice] == '100'):
numero += 1;
try:
indice += 3;
lista[indice]
except:
break
quantidade.append(numero);
self.aberto.close();
dados.append(quantidade)
total[arquivo] = quantidade
mat = MatematicaClass(total)
organizados, N = mat.confianca()
N = 4
fig = plt.figure()
ax = fig.add_subplot(111)
nomesProntos, mediasProntas, ercProntos, erbProntos = [],[0],[0],[0]
cor, rects = [],[]
cm = get_cmap('gist_rainbow')
for i in range(N):
cor.append(cm(1.*i/N)) # color will now be an RGBA tuple
cor = iter(cor)
dictItems = {'sequentialaleatory':[],'aleatorysequential':[],'sequentialsequential':[],'aleatoryaleatory':[]}
for elementos in organizados.values():
for elemento in elementos:
item = ItemClass(elemento)
item.elemento = elementos[elemento]
dictItems[item.tipoDownload+item.tipoConsumo].append(item)
dictItems[item.tipoDownload+item.tipoConsumo].sort(key = lambda x: int(x.download))
for elementos in dictItems.values():
for elemento in elementos:
strNome = str(elemento.tipoDownload) + " Download"+ ", "+ str(elemento.tipoConsumo)+" Consumption"
if strNome not in nomesProntos:
nomesProntos.append(strNome)
media, (erc,erb) = elemento.elemento[-1]['confianca'][0]
media, erc, erb
mediasProntas.append(float("{0:.2f}".format(media)))
ercProntos.append(media-erc)
erbProntos.append(erb-media)
legendasX = [0,10,30,50,70,100]
rects.append(ax.errorbar(legendasX, mediasProntas, yerr=[erbProntos,ercProntos],color=cor.next(),fmt='--o'))
mediasProntas, ercProntos, erbProntos = [0],[0],[0]
# algo mais
ax.set_ylabel(u'Quantidade de Conteúdo Consumido')
ax.set_title('Consumo Acumulado ao Longo de 120 Timeslots')
ax.set_xlabel('Taxa de Download')
ax.grid(True)
ax.set_xticks(legendasX)
ax.set_xticklabels(legendasX)
ax.set_ylim(bottom=-10,top=7000)
#ax.set_yscale('log')
leg = ax.legend(rects,nomesProntos,loc='upper center', bbox_to_anchor=(0.5,-0.1))
for t in leg.get_texts():
t.set_fontsize('small')
plt.savefig(os.path.join(self.caminho,nome), bbox_extra_artists=(leg,), bbox_inches='tight')
ax.set_yscale('log')
plt.savefig(os.path.join(self.caminho,'log'+nome), bbox_extra_artists=(leg,), bbox_inches='tight')
# cairoplot.horizontal_bar_plot(os.path.join(self.caminho,'2'+nome),dados,self.largura,self.altura, y_labels = self.arquivos,
# border = 20, display_values = True, rounded_corners = True, colors = [(0.5,0.2,0)]);
# def tempoPeersSemDownload(self,nome='tempoxNsemdownload.svg'):
'''
Organiza dados de tempo x numero de peers sem fazer download naquele tempo
x=[tempo1,tempo2, ...]
y=[nPeers, nPeers, ...]
total = {};
for arquivo in self.arquivos:
self.aberto = open(arquivo,"r");
tempo,quantidade =[],[];
numero = 0
tempo.append('0');
for linha in self.aberto:
linha = linha.rstrip();
lista = linha.split(',');
if (lista[0] == 'T'):
tempo.append(lista[1]);
if lista[1] > 1:
quantidade.append(numero);
numero = 0
elif (lista[0] == 'W'):
while '' in lista:
lista.remove('');
if(lista[1] == '1'):
numero += 1;
quantidade.append(numero);
self.aberto.close();
total[arquivo] = quantidade
data = Series(total)
cairoplot.dot_line_plot(os.path.join(self.caminho,nome),data,self.largura,self.altura,x_labels = tempo,
axis = True, grid = True, x_title = "Tempo", y_title = "Nº de Peers com Download Parado", series_legend=True);
'''
def tempoPeersSemDownloadAcumulado(self,nome='tempoxNsemdownloadacum.svg'):
'''
Organiza dados de arquivo x numero de peers sem fazer download acumulado
x=[arquivo1,arquivo2, ...]
y=[nPeers, nPeers, ...]
'''
total = {};
dados = [];
for arquivo in self.arquivos:
self.aberto = open(arquivo,"r");
quantidade =[];
numero = 0
for linha in self.aberto:
linha = linha.rstrip();
lista = linha.split(',');
if (lista[0] == 'W'):
while '' in lista:
lista.remove('');
if(lista[1] == '1'):
numero += 1;
quantidade.append(numero);
self.aberto.close();
dados.append(quantidade)
total[arquivo] = quantidade
mat = MatematicaClass(total)
organizados, N = mat.confianca()
N = 4
fig = plt.figure()
ax = fig.add_subplot(111)
nomesProntos, mediasProntas, ercProntos, erbProntos = [],[0],[0],[0]
cor, rects = [],[]
cm = get_cmap('gist_rainbow')
for i in range(N):
cor.append(cm(1.*i/N)) # color will now be an RGBA tuple
cor = iter(cor)
dictItems = {'sequentialaleatory':[],'aleatorysequential':[],'sequentialsequential':[],'aleatoryaleatory':[]}
for elementos in organizados.values():
for elemento in elementos:
item = ItemClass(elemento)
item.elemento = elementos[elemento]
dictItems[item.tipoDownload+item.tipoConsumo].append(item)
dictItems[item.tipoDownload+item.tipoConsumo].sort(key = lambda x: int(x.download))
for elementos in dictItems.values():
for elemento in elementos:
strNome = str(elemento.tipoDownload) + " Download"+ ", "+ str(elemento.tipoConsumo)+" Consumption"
if strNome not in nomesProntos:
nomesProntos.append(strNome)
media, (erc,erb) = elemento.elemento[-1]['confianca'][0]
media, erc, erb
mediasProntas.append(float("{0:.2f}".format(media)))
ercProntos.append(media-erc)
erbProntos.append(erb-media)
legendasX = [0,10,30,50,70,100]
rects.append(ax.errorbar(legendasX, mediasProntas, yerr=[erbProntos,ercProntos],color=cor.next(),fmt='--o'))
mediasProntas, ercProntos, erbProntos = [0],[0],[0]
# algo mais
ax.set_ylabel('Tempo de Peers sem Download')
ax.set_title('Tempo Acumulado sem Download')
ax.set_xlabel('Taxa de Download')
ax.grid(True)
ax.set_xticks(legendasX)
ax.set_xticklabels(legendasX)
ax.set_ylim(bottom=-10,top=7000)
#ax.set_yscale('log')
leg = ax.legend(rects,nomesProntos,loc='upper center', bbox_to_anchor=(0.5,-0.1))
for t in leg.get_texts():
t.set_fontsize('small')
plt.savefig(os.path.join(self.caminho,nome), bbox_extra_artists=(leg,), bbox_inches='tight')
ax.set_yscale('log')
plt.savefig(os.path.join(self.caminho,'log'+nome), bbox_extra_artists=(leg,), bbox_inches='tight')
#cairoplot.horizontal_bar_plot(os.path.join(self.caminho,'2'+nome),dados,self.largura,self.altura, y_labels = self.arquivos,
# border = 20, display_values = True, rounded_corners = True, colors = [(0.5,0.2,0)]);
# def tempoPeersSemConsumo(self,nome='tempoxNsemconsumo.svg'):
'''
Organiza dados de tempo x numero de peers sem fazer consumo naquele tempo
x=[tempo1,tempo2, ...]
y=[nPeers, nPeers, ...]
total = {};
for arquivo in self.arquivos:
self.aberto = open(arquivo,"r");
tempo,quantidade =[],[];
numero = 0
tempo.append('0');
for linha in self.aberto:
linha = linha.rstrip();
lista = linha.split(',');
if (lista[0] == 'T'):
tempo.append(lista[1]);
if lista[1] > 1:
quantidade.append(numero);
numero = 0
elif (lista[0] == 'S'):
while '' in lista:
lista.remove('');
if(lista[1] == '1'):
numero += 1;
quantidade.append(numero);
self.aberto.close();
total[arquivo] = quantidade
data = Series(total)
cairoplot.dot_line_plot(os.path.join(self.caminho,nome),data,self.largura,self.altura,x_labels = tempo,
axis = True, grid = True, x_title = "Tempo", y_title = "Nº de Peers com Consumo Parado", series_legend=True);
'''
def tempoPeersSemConsumoAcumulado(self,nome='tempoxNsemconsumoacum.svg'):
'''
Organiza dados de arquivo x numero de peers sem fazer download acumulado
x=[arquivo1,arquivo2, ...]
y=[nPeers, nPeers, ...]
'''
total = {};
dados = [];
for arquivo in self.arquivos:
self.aberto = open(arquivo,"r");
quantidade =[];
numero = 0
for linha in self.aberto:
linha = linha.rstrip();
lista = linha.split(',');
if (lista[0] == 'S'):
while '' in lista:
lista.remove('');
if(lista[1] == '1'):
numero += 1;
quantidade.append(numero);
self.aberto.close();
dados.append(quantidade)
total[arquivo] = quantidade
mat = MatematicaClass(total)
organizados, N = mat.confianca()
N = 4
fig = plt.figure()
ax = fig.add_subplot(111)
nomesProntos, mediasProntas, ercProntos, erbProntos = [],[0],[0],[0]
cor, rects = [],[]
cm = get_cmap('gist_rainbow')
for i in range(N):
cor.append(cm(1.*i/N)) # color will now be an RGBA tuple
cor = iter(cor)
dictItems = {'sequentialaleatory':[],'aleatorysequential':[],'sequentialsequential':[],'aleatoryaleatory':[]}
for elementos in organizados.values():
for elemento in elementos:
item = ItemClass(elemento)
item.elemento = elementos[elemento]
dictItems[item.tipoDownload+item.tipoConsumo].append(item)
dictItems[item.tipoDownload+item.tipoConsumo].sort(key = lambda x: int(x.download))
for elementos in dictItems.values():
for elemento in elementos:
strNome = str(elemento.tipoDownload) + " Download"+ ", "+ str(elemento.tipoConsumo)+" Consumption"
if strNome not in nomesProntos:
nomesProntos.append(strNome)
nomesProntos = list(nomesProntos)
media, (erc,erb) = elemento.elemento[-1]['confianca'][0]
media, erc, erb
mediasProntas.append(float("{0:.2f}".format(media)))
ercProntos.append(media-erc)
erbProntos.append(erb-media)
legendasX = [0,10,30,50,70,100]
rects.append(ax.errorbar(legendasX, mediasProntas, yerr=[erbProntos,ercProntos],color=cor.next(),fmt='--o'))
mediasProntas, ercProntos, erbProntos = [0],[0],[0]
# algo mais
ax.set_ylabel('Tempo Bloqueado (Tempo de Peers sem Consumo)')
ax.set_title('Tempo Acumulado sem Consumo ao Longo de 120 Timeslots')
ax.set_xlabel('Taxa de Download')
ax.grid(True)
ax.set_xticks(legendasX)
ax.set_xticklabels(legendasX)
ax.set_ylim(bottom=-10,top=7000)
#ax.set_yscale('log')
leg = ax.legend(rects,nomesProntos,loc='upper center', bbox_to_anchor=(0.5,-0.1))
for t in leg.get_texts():
t.set_fontsize('small')
plt.savefig(os.path.join(self.caminho,nome), bbox_extra_artists=(leg,), bbox_inches='tight')
ax.set_yscale('log')
plt.savefig(os.path.join(self.caminho,'log'+nome), bbox_extra_artists=(leg,), bbox_inches='tight')
# cairoplot.horizontal_bar_plot(os.path.join(self.caminho,'2'+nome),dados,self.largura,self.altura, y_labels = self.arquivos,
# border = 20, display_values = True, rounded_corners = True, colors = [(0.5,0.2,0)]);
# def parcelaDownloadAcumulado(self,nome='parcelaxDownloadacum.svg'):
'''
Organiza dados de situacao (pareamento com download ou nao) x numero vezes
x=[situacao1,situacao2, ...]
y=[nVezes, nVezes, ...]
total = {};
for arquivo in self.arquivos:
self.aberto = open(arquivo,"r");
quantidadeS,quantidadeC =[],[];
comPar,semPar = 0,0
for linha in self.aberto:
linha = linha.rstrip();
lista = linha.split(',');
if (lista[0] == 'W'):
while '' in lista:
lista.remove('');
if(lista[1] == '1'):
comPar += 1;
elif(lista[1] == '0'):
semPar +=1;
quantidadeS.append(semPar);
quantidadeC.append(comPar);
self.aberto.close();
total['SemPar'+arquivo] = quantidadeS
total['ComPar'+arquivo] = quantidadeC
data = Series(total)
background = cairo.LinearGradient(300, 0, 300, 400)
background.add_color_stop_rgb(0,0.4,0.4,0.4)
background.add_color_stop_rgb(1.0,0.1,0.1,0.1)
try:
cairoplot.donut_plot(os.path.join(self.caminho,nome),data,self.largura,self.altura,
background = background, gradient = True, shadow = True, inner_radius = 0.3);
except:
pass
'''
class ItemClass(object):
'''
Classe de item de grafico, aonde sao definidos os itens e seus atributos
'''
def __init__(self,nomeArquivo):
'''
Declara nome de arquivo para separar
'''
self.nomeArquivo = nomeArquivo
self.separaAtributos()
def separaAtributos(self):
'''
Metodo que separa atributos do arquivo
'''
self.tipoDownload = self.nomeArquivo[0:self.nomeArquivo.find("_")]
self.peers = self.nomeArquivo[self.nomeArquivo.find("_Q")+2:self.nomeArquivo.find("_",self.nomeArquivo.find("_Q")+2)]
self.playlist = self.nomeArquivo[self.nomeArquivo.find("_P")+2:self.nomeArquivo.find("_",self.nomeArquivo.find("_P")+2)]
self.lista = self.nomeArquivo[self.nomeArquivo.find("_L")+2:self.nomeArquivo.find("_",self.nomeArquivo.find("_L")+2)]
self.listaTotal = self.nomeArquivo[self.nomeArquivo.find("_S")+2:self.nomeArquivo.find("_",self.nomeArquivo.find("_S")+2)]
self.consumo = self.nomeArquivo[self.nomeArquivo.find("_R")+2:self.nomeArquivo.find("_",self.nomeArquivo.find("_R")+2)]
self.timeslot = self.nomeArquivo[self.nomeArquivo.find("_V")+2:self.nomeArquivo.find("_",self.nomeArquivo.find("_V")+2)]
self.tipoConsumo = self.nomeArquivo[self.nomeArquivo.find("_Y")+2:self.nomeArquivo.find("_",self.nomeArquivo.find("_Y")+2)]
self.download = self.nomeArquivo[self.nomeArquivo.find("_X")+2:self.nomeArquivo.find("_",self.nomeArquivo.find("_X")+2)]
self.delay = self.nomeArquivo[self.nomeArquivo.find("_D")+2:self.nomeArquivo.find(".txt")]
| gpl-3.0 |
vermouthmjl/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
gxx/lettuce | tests/integration/django/dill/leaves/features/steps.py | 17 | 1432 | import json
from django.core.management import call_command
from leaves.models import *
from lettuce import after, step
from lettuce.django.steps.models import *
from nose.tools import assert_equals
after.each_scenario(clean_db)
max_rego = 0
@creates_models(Harvester)
def create_with_rego(step):
data = hashes_data(step)
for hash_ in data:
hash_['rego'] = hash_['make'][:3].upper() + "001"
create_models(Harvester, data)
@checks_existence(Harvester)
def check_with_rego(step):
data = hashes_data(step)
for hash_ in data:
try:
hash_['rego'] = hash_['rego'].upper()
except KeyError:
pass
models_exist(Harvester, data)
@step(r'The database dump is as follows')
def database_dump(step):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
output = StringIO()
call_command('dumpdata', stdout=output, indent=2)
output = output.getvalue()
assert_equals(json.loads(output), json.loads(step.multiline))
@step(r'I have populated the database')
def database_populated(step):
pass
@step(r'I count the harvesters')
def count_harvesters(step):
print "Harvester count: %d" % Harvester.objects.count()
@creates_models(Panda)
def create_pandas(step):
data = hashes_data(step)
if 'name' in data:
data['name'] += ' Panda'
return create_models(Panda, data)
| gpl-3.0 |
mathemage/h2o-3 | h2o-py/tests/testdir_misc/pyunit_pivot_medium.py | 5 | 2462 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Pyunit for h2o.pivot"""
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
from builtins import range
import h2o
import pandas
from tests import pyunit_utils
def pivot():
df = h2o.create_frame(rows=1000000,
cols=3,
factors=10,
categorical_fraction=1.0/3,
time_fraction=1.0/3,
real_fraction=1.0/3,
real_range=100,
missing_fraction=0.0,
seed=123)
# Pandas comparison
pdf = df.as_data_frame()
ppdf = pdf.pivot(values="C3",index="C1",columns="C2")
ppdf = ppdf.fillna(0.0)
ppdfh2o = h2o.H2OFrame(ppdf)
pivoted = df.pivot(index="C1",column="C2",value="C3")
pivoted.impute(values=[0]*11)
assert abs((pivoted[:,1:11] - ppdfh2o).sum(return_frame=False)) < 1e-11, "Difference between Pandas pivot too high"
print("Testing size: ")
for s in [100,200,300,400,500,1000,2000,4211,100000]:
print(str(s))
df1 = h2o.H2OFrame({"index":list(range(1,s+1)),"column":["a"]*s,"value":[1]*s})
df2 = h2o.H2OFrame({"index":list(range(1,s+1)),"column":["b"]*s,"value":[2]*s})
df3 = h2o.H2OFrame({"index":list(range(1,s+1)),"column":["c"]*s,"value":[3]*s})
dfall = df1.rbind(df2)
dfall = dfall.rbind(df3)
res = dfall.pivot(index="index", column="column", value="value")
assert res["a"].sum()[0,0] == 1*s, "Wrong sum of 'a' column"
assert res["b"].sum()[0,0] == 2*s, "Wrong sum of 'b' column"
assert res["c"].sum()[0,0] == 3*s, "Wrong sum of 'c' column"
# See if it can find the last label
df = h2o.create_frame(rows=1000001,randomize=False,integer_fraction=0.0,categorical_fraction=0.0,time_fraction=0.0,cols=2,value=1.0,missing_fraction=0.0)
df2 = h2o.create_frame(rows=1000000,integer_fraction=0.0,categorical_fraction=1.0,time_fraction=0.0,cols=1,factors=2,missing_fraction=0.0)
df2b = df2.rbind(h2o.H2OFrame({"C1":"b"}))
df2b.set_names(["C3"])
dft = df.cbind(df2b)
p = dft.pivot(index="C1",value="C2",column="C3")
assert len(p.columns) == 4, "Wrong number of columns for last label test"
assert len(p) == 1, "Wrong number of rows for last label test"
if __name__ == "__main__":
pyunit_utils.standalone_test(pivot)
else:
pivot()
| apache-2.0 |
ilo10/scikit-learn | sklearn/utils/tests/test_testing.py | 144 | 4121 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/pie_and_polar_charts/pie_demo_features.py | 3 | 1070 | """
Demo of a basic pie chart plus a few additional features.
In addition to the basic pie chart, this demo shows a few optional features:
* slice labels
* auto-labeling the percentage
* offsetting a slice with "explode"
* drop-shadow
* custom start angle
Note about the custom start angle:
The default ``startangle`` is 0, which would start the "Frogs" slice on the
positive x-axis. This example sets ``startangle = 90`` such that everything is
rotated counter-clockwise by 90 degrees, and the frog slice starts on the
positive y-axis.
"""
import matplotlib.pyplot as plt
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90)
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
plt.show()
| apache-2.0 |
gargleblaster/trading-with-python | lib/functions.py | 76 | 11627 | # -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df) | bsd-3-clause |
JanetMatsen/Machine_Learning_CSE_546 | HW1/Q6_least_squares/ridge_regression.py | 1 | 2679 | import numpy as np
import pandas as pd
import scipy.sparse as sp
import scipy.sparse.linalg as splin
class Ridge:
def __init__(self, X, y, lam):
assert type(X) == sp.csc_matrix or type(X) == sp.csr_matrix
assert type(lam*1.0) == float
assert type(y) == sp.csr_matrix or type(y) == sp.csc_matrix
self.X = X
self.N = X.shape[0]
self.y = y
self.lam = lam
self.w = None
def solve(self):
d = self.X.shape[1] # d = number of features/columns
# find lambda*I_D + X^T*X
piece_to_invert = sp.identity(d)*self.lam + self.X.T.dot(self.X)
inverted_piece = splin.inv(piece_to_invert)
solution = inverted_piece.dot(self.X.T)
solution = solution.dot(self.y)
self.w = solution
def sse(self):
# sse = RSS
error_v = self.X.dot(self.w) - self.y
return self.extract_scalar(error_v.T.dot(error_v))
def rmse(self):
return(self.sse()/self.N)**0.5
@staticmethod
def extract_scalar(m):
assert(m.shape == (1, 1))
return m[0, 0]
class RidgeRegularizationPath:
def __init__(self, train_X, train_y, lam_max, frac_decrease, steps,
val_X, val_y):
self.train_X = train_X
self.train_y = train_y
self.train_N, self.train_d = train_X.shape
self.lam_max = lam_max
self.frac_decrease = frac_decrease
self.steps = steps
self.val_X = val_X
self.val_y = val_y
def train_with_lam(self, lam):
rr = Ridge(self.train_X, self.train_y, lam=lam)
rr.solve()
sse_train = rr.sse()
# replace the y values with the validation y and get the val sss
rr.X = self.val_X
rr.y = self.val_y
sse_val = rr.sse()
assert rr.w.shape == (self.train_d, 1) # check before we slice out
return rr.w.toarray()[:,0], sse_train, sse_val
def walk_path(self):
# protect the first value of lambda.
lam = self.lam_max/self.frac_decrease
# initialize a dataframe to store results in
results = pd.DataFrame()
for c in range(0, self.steps):
lam = lam*self.frac_decrease
print("Loop {}: solving weights. Lambda = {}".format(c+1, lam))
w, sse_train, sse_val = self.train_with_lam(lam)
one_val = pd.DataFrame({"lam":[lam],
"weights":[w],
"SSE (training)": [sse_train],
"SSE (validaton)": [sse_val]})
results = pd.concat([results, one_val])
self.results_df = results
| mit |
antoinecarme/pyaf | tests/neuralnet/test_ozone_GPU.py | 1 | 1962 |
def pickleModel(iModel):
import pickle
output = pickle.dumps(iModel)
lReloadedObject = pickle.loads(output)
output2 = pickle.dumps(lReloadedObject)
assert(iModel.to_json() == lReloadedObject.to_json())
return lReloadedObject;
def buildModel(iParallel = True):
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
import logging
import logging.config
# logging.config.fileConfig('logging.conf')
logging.basicConfig(level=logging.INFO)
# get_ipython().magic('matplotlib inline')
b1 = tsds.load_ozone()
df = b1.mPastData
# df.tail(10)
# df[:-10].tail()
# df[:-10:-1]
# df.describe()
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.mParallelMode = iParallel;
lEngine.mOptions.set_active_autoregressions(['MLP' , 'LSTM']);
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine2 = pickleModel(lEngine)
lEngine2.getModelInfo();
print(lEngine2.mSignalDecomposition.mTrPerfDetails.head());
lEngine2.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine2.standardPlots("outputs/my_rnn_ozone");
dfapp_in = df.copy();
dfapp_in.tail()
# H = 12
dfapp_out = lEngine2.forecast(dfapp_in, H);
# dfapp_out.to_csv("outputs/rnn_ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine2.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/dask/dataframe/tseries/tests/test_resample.py | 3 | 3327 | from itertools import product
import pandas as pd
import pytest
from dask.dataframe.utils import assert_eq
import dask.dataframe as dd
def resample(df, freq, how='mean', **kwargs):
return getattr(df.resample(freq, **kwargs), how)()
@pytest.mark.parametrize(['obj', 'method', 'npartitions', 'freq', 'closed', 'label'],
list(product(['series', 'frame'],
['count', 'mean', 'ohlc'],
[2, 5],
['30T', 'h', 'd', 'w', 'M'],
['right', 'left'],
['right', 'left'])))
def test_series_resample(obj, method, npartitions, freq, closed, label):
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index = index.union(pd.date_range('4-15-2000', '5-15-2000', freq='h'))
if obj == 'series':
ps = pd.Series(range(len(index)), index=index)
elif obj == 'frame':
ps = pd.DataFrame({'a':range(len(index))}, index=index)
ds = dd.from_pandas(ps, npartitions=npartitions)
# Series output
result = resample(ds, freq, how=method, closed=closed, label=label)
expected = resample(ps, freq, how=method, closed=closed, label=label)
assert_eq(result, expected, check_dtype=False)
divisions = result.divisions
assert expected.index[0] == divisions[0]
assert expected.index[-1] == divisions[-1]
def test_resample_agg():
index = pd.date_range('2000-01-01', '2000-02-15', freq='h')
ps = pd.Series(range(len(index)), index=index)
ds = dd.from_pandas(ps, npartitions=2)
assert_eq(ds.resample('10min').agg('mean'),
ps.resample('10min').agg('mean'))
assert_eq(ds.resample('10min').agg(['mean', 'min']),
ps.resample('10min').agg(['mean', 'min']))
def test_resample_agg_passes_kwargs():
index = pd.date_range('2000-01-01', '2000-02-15', freq='h')
ps = pd.Series(range(len(index)), index=index)
ds = dd.from_pandas(ps, npartitions=2)
def foo(series, bar=1, *args, **kwargs):
return bar
assert_eq(ds.resample('2h').agg(foo, bar=2),
ps.resample('2h').agg(foo, bar=2))
assert (ds.resample('2h').agg(foo, bar=2) == 2).compute().all()
def test_series_resample_not_implemented():
index = pd.date_range(start='2012-01-02', periods=100, freq='T')
s = pd.Series(range(len(index)), index=index)
ds = dd.from_pandas(s, npartitions=5)
# Frequency doesn't evenly divide day
pytest.raises(NotImplementedError, lambda: resample(ds, '57T'))
def test_unknown_divisions_error():
df = pd.DataFrame({'x': [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2, sort=False)
try:
ddf.x.resample('1m').mean()
assert False
except ValueError as e:
assert 'divisions' in str(e)
def test_resample_index_name():
import numpy as np
from datetime import datetime, timedelta
date_today = datetime.now()
days = pd.date_range(date_today, date_today + timedelta(20), freq='D')
data = np.random.randint(1, high=100, size=len(days))
df = pd.DataFrame({'date': days, 'values': data})
df = df.set_index('date')
ddf = dd.from_pandas(df, npartitions=4)
assert ddf.resample('D').mean().head().index.name == "date"
| gpl-3.0 |
MartialD/hyperspy | hyperspy/samfire_utils/samfire_worker.py | 4 | 11559 | # Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import sys
from itertools import combinations, product
from queue import Empty
import dill
import numpy as np
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
from hyperspy.signal import BaseSignal
from hyperspy.utils.model_selection import AICc
_logger = logging.getLogger(__name__)
class Worker:
def __init__(self, identity, individual_queue=None, shared_queue=None,
result_queue=None):
self.identity = identity
self.individual_queue = individual_queue
self.shared_queue = shared_queue
self.result_queue = result_queue
self.timestep = 0.001
self.max_get_timeout = 3
self._AICc_fraction = 0.99
self.reset()
self.last_time = 1
self.optional_names = set()
self.model = None
self.parameters = {}
def create_model(self, signal_dict, model_letter):
_logger.debug('Creating model in worker {}'.format(self.identity))
sig = BaseSignal(**signal_dict)
sig._assign_subclass()
self.model = sig.models[model_letter].restore()
for component in self.model:
component.active_is_multidimensional = False
component.active = True
for par in component.parameters:
par.map = par.map.copy()
if self.model.signal.metadata.has_item(
'Signal.Noise_properties.variance'):
var = self.model.signal.metadata.Signal.Noise_properties.variance
if isinstance(var, BaseSignal):
var.data = var.data.copy()
self._array_views_to_copies()
def _array_views_to_copies(self):
dct = self.model.__dict__
self.parameters = {}
for k, v in dct.items():
if isinstance(v, BaseSignal):
v.data = v.data.copy()
if k not in ['signal', 'image', 'spectrum'] and not \
k.startswith('_'):
self.parameters[k] = None
if isinstance(v, np.ndarray):
dct[k] = v.copy()
def set_optional_names(self, optional_names):
self.optional_names = optional_names
_logger.debug('Setting optional names in worker {} to '
'{}'.format(self.identity, self.optional_names))
def set_parameter_boundaries(self, received):
for rec, comp in zip(received, self.model):
for (bmin, bmax), par in zip(rec, comp.parameters):
par.bmin = bmin
par.bmax = bmax
def generate_values_iterator(self, turned_on_names):
tmp = []
name_list = []
for _comp_n, _comp in self.value_dict.items():
for par_n, par in _comp.items():
if _comp_n not in turned_on_names:
par = [None, ]
if not isinstance(par, list):
par = [par, ]
tmp.append(par)
name_list.append((_comp_n, par_n))
return name_list, product(*tmp)
def set_values(self, name_list, iterator):
for value_combination in iterator:
for (comp_name, parameter_name), value in zip(name_list,
value_combination):
if value is None:
self.model[comp_name].active = False
else:
self.model[comp_name].active = True
try:
getattr(self.model[comp_name],
parameter_name).value = value
except BaseException:
e = sys.exc_info()[0]
to_send = ('Error',
(self.identity,
'Setting {}.{} value to {}. '
'Caught:\n{}'.format(comp_name,
parameter_name,
value,
e)
)
)
if self.result_queue is None:
return to_send
else:
self.result_queue.put()
return
yield
def fit(self, component_comb):
name_list, iterator = self.generate_values_iterator(component_comb)
good_fit = False
for _ in self.set_values(name_list, iterator):
self.model.fit(**self.fitting_kwargs)
good_fit = self.fit_test.test(self.model, (0,))
if good_fit:
break
return good_fit
def generate_component_combinations(self):
all_names = {component.name for component in self.model}
names_to_skip_generators = [combinations(self.optional_names, howmany)
for howmany in
range(len(self.optional_names) + 1)]
names_to_skip = []
for _gen in names_to_skip_generators:
names_to_skip.extend(list(_gen))
for name_comb in names_to_skip:
yield all_names - set(name_comb)
def reset(self):
self.best_AICc = np.inf
self.best_values = []
self.best_dof = np.inf
def run_pixel(self, ind, value_dict):
self.reset()
self.ind = ind
self.value_dict = value_dict
self.fitting_kwargs = self.value_dict.pop('fitting_kwargs', {})
if 'min_function' in self.fitting_kwargs:
self.fitting_kwargs['min_function'] = dill.loads(
self.fitting_kwargs['min_function'])
if 'min_function_grad' in self.fitting_kwargs and isinstance(
self.fitting_kwargs['min_function_grad'], bytes):
self.fitting_kwargs['min_function_grad'] = dill.loads(
self.fitting_kwargs['min_function_grad'])
self.model.signal.data[:] = self.value_dict.pop('signal.data')
if self.model.signal.metadata.has_item(
'Signal.Noise_properties.variance'):
var = self.model.signal.metadata.Signal.Noise_properties.variance
if isinstance(var, BaseSignal):
var.data[:] = self.value_dict.pop('variance.data')
if 'low_loss.data' in self.value_dict:
self.model.low_loss.data[:] = self.value_dict.pop('low_loss.data')
for component_comb in self.generate_component_combinations():
good_fit = self.fit(component_comb)
if good_fit:
if len(self.optional_names) == 0:
return self.send_results(current=True)
else:
self.compare_models()
return self.send_results()
def _collect_values(self):
result = {component.name: {parameter.name: parameter.map.copy() for
parameter in component.parameters} for
component in self.model if component.active}
return result
def compare_models(self):
new_AICc = AICc(self.model)
AICc_test = new_AICc < (self._AICc_fraction * self.best_AICc)
AICc_absolute_test = np.abs(new_AICc - self.best_AICc) <= \
np.abs(self._AICc_fraction * self.best_AICc)
dof_test = len(self.model.p0) < self.best_dof
if AICc_test or AICc_absolute_test and dof_test:
self.best_values = self._collect_values()
self.best_AICc = new_AICc
self.best_dof = len(self.model.p0)
for k in self.parameters.keys():
self.parameters[k] = getattr(self.model, k).data[0]
def send_results(self, current=False):
if current:
self.best_values = self._collect_values()
for k in self.parameters.keys():
self.parameters[k] = getattr(self.model, k).data[0]
if len(self.best_values): # i.e. we have a good result
_logger.debug('we have a good result in worker '
'{}'.format(self.identity))
result = {k + '.data': np.array(v) for k, v in
self.parameters.items()}
result['components'] = self.best_values
found_solution = True
else:
_logger.debug("we don't have a good result in worker "
"{}".format(self.identity))
result = None
found_solution = False
to_send = ('result', (self.identity, self.ind, result, found_solution))
if self.individual_queue is None:
return to_send
self.result_queue.put(to_send)
def setup_test(self, test_string):
self.fit_test = dill.loads(test_string)
def start_listening(self):
self._listening = True
self.listen()
def stop_listening(self):
self._listening = False
def parse(self, result):
function = result
arguments = []
if isinstance(result, tuple):
function, arguments = result
getattr(self, function)(*arguments)
def ping(self, message=None):
to_send = ('pong', (self.identity, os.getpid(), time.time(), message))
if self.result_queue is None:
return to_send
self.result_queue.put(to_send)
def sleep(self, howlong=None):
if howlong is None:
howlong = self.timestep
self.last_time = time.time()
time.sleep(howlong)
def change_timestep(self, value):
self.timestep = value
def listen(self):
while self._listening:
queue = None
found_what_to_do = False
time_diff = time.time() - self.last_time
if time_diff >= self.timestep:
if not self.individual_queue.empty():
queue = self.individual_queue
elif (self.shared_queue is not None and not
self.shared_queue.empty()):
queue = self.shared_queue
if queue is not None:
try:
result = queue.get(block=True,
timeout=self.max_get_timeout)
found_what_to_do = True
self.parse(result)
except Empty:
pass
if not found_what_to_do:
self.sleep()
else:
self.sleep()
def create_worker(identity, individual_queue=None,
shared_queue=None, result_queue=None):
w = Worker(identity, individual_queue, shared_queue, result_queue)
if individual_queue is None:
return w
w.start_listening()
return 1
| gpl-3.0 |
olologin/scikit-learn | examples/applications/plot_prediction_latency.py | 85 | 11395 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[[0]])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
tartavull/google-cloud-python | monitoring/tests/unit/test__dataframe.py | 4 | 8271 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import pandas
except ImportError:
HAVE_PANDAS = False
else:
HAVE_PANDAS = True # pragma: NO COVER
import unittest
PROJECT = 'my-project'
INSTANCE_NAMES = ['instance-1', 'instance-2']
INSTANCE_ZONES = ['us-east1-a', 'us-east1-b']
INSTANCE_IDS = ['1234567890123456789', '9876543210987654321']
METRIC_TYPE = 'compute.googleapis.com/instance/cpu/utilization'
METRIC_LABELS = list({'instance_name': name} for name in INSTANCE_NAMES)
RESOURCE_TYPE = 'gce_instance'
RESOURCE_LABELS = list({
'project_id': PROJECT,
'zone': zone,
'instance_id': instance_id,
} for zone, instance_id in zip(INSTANCE_ZONES, INSTANCE_IDS))
METRIC_KIND = 'GAUGE'
VALUE_TYPE = 'DOUBLE'
TIMESTAMPS = [
'2016-04-06T22:05:00.042Z',
'2016-04-06T22:05:01.042Z',
'2016-04-06T22:05:02.042Z',
]
DIMENSIONS = len(TIMESTAMPS), len(INSTANCE_NAMES)
VALUES = list(0.1 * i for i in range(DIMENSIONS[1]))
ARRAY = [VALUES] * DIMENSIONS[0]
def parse_timestamps(): # pragma: NO COVER
import datetime
from google.cloud._helpers import _RFC3339_MICROS
return [datetime.datetime.strptime(t, _RFC3339_MICROS)
for t in TIMESTAMPS]
def generate_query_results(): # pragma: NO COVER
from google.cloud.monitoring.metric import Metric
from google.cloud.monitoring.resource import Resource
from google.cloud.monitoring.timeseries import Point
from google.cloud.monitoring.timeseries import TimeSeries
def P(timestamp, value):
return Point(
start_time=timestamp,
end_time=timestamp,
value=value,
)
for metric_labels, resource_labels, value in zip(
METRIC_LABELS, RESOURCE_LABELS, VALUES):
yield TimeSeries(
metric=Metric(type=METRIC_TYPE, labels=metric_labels),
resource=Resource(type=RESOURCE_TYPE, labels=resource_labels),
metric_kind=METRIC_KIND,
value_type=VALUE_TYPE,
points=[P(t, value) for t in TIMESTAMPS],
)
@unittest.skipUnless(HAVE_PANDAS, 'No pandas')
class Test__build_dataframe(unittest.TestCase): # pragma: NO COVER
def _call_fut(self, *args, **kwargs):
from google.cloud.monitoring._dataframe import _build_dataframe
return _build_dataframe(*args, **kwargs)
def test_both_label_and_labels_illegal(self):
with self.assertRaises(ValueError):
self._call_fut([], label='instance_name', labels=['zone'])
def test_empty_labels_illegal(self):
with self.assertRaises(ValueError):
self._call_fut([], labels=[])
def test_simple_label(self):
iterable = generate_query_results()
dataframe = self._call_fut(iterable, label='instance_name')
self.assertEqual(dataframe.shape, DIMENSIONS)
self.assertEqual(dataframe.values.tolist(), ARRAY)
self.assertEqual(list(dataframe.columns), INSTANCE_NAMES)
self.assertIsNone(dataframe.columns.name)
self.assertEqual(list(dataframe.index), parse_timestamps())
self.assertIsNone(dataframe.index.name)
def test_multiple_labels(self):
NAMES = ['resource_type', 'instance_id']
iterable = generate_query_results()
dataframe = self._call_fut(iterable, labels=NAMES)
self.assertEqual(dataframe.shape, DIMENSIONS)
self.assertEqual(dataframe.values.tolist(), ARRAY)
expected_headers = [(RESOURCE_TYPE, instance_id)
for instance_id in INSTANCE_IDS]
self.assertEqual(list(dataframe.columns), expected_headers)
self.assertEqual(dataframe.columns.names, NAMES)
self.assertIsNone(dataframe.columns.name)
self.assertEqual(list(dataframe.index), parse_timestamps())
self.assertIsNone(dataframe.index.name)
def test_multiple_labels_with_just_one(self):
NAME = 'instance_id'
NAMES = [NAME]
iterable = generate_query_results()
dataframe = self._call_fut(iterable, labels=NAMES)
self.assertEqual(dataframe.shape, DIMENSIONS)
self.assertEqual(dataframe.values.tolist(), ARRAY)
self.assertEqual(list(dataframe.columns), INSTANCE_IDS)
self.assertEqual(dataframe.columns.names, NAMES)
self.assertEqual(dataframe.columns.name, NAME)
self.assertEqual(list(dataframe.index), parse_timestamps())
self.assertIsNone(dataframe.index.name)
def test_smart_labels(self):
NAMES = ['resource_type', 'project_id',
'zone', 'instance_id',
'instance_name']
iterable = generate_query_results()
dataframe = self._call_fut(iterable)
self.assertEqual(dataframe.shape, DIMENSIONS)
self.assertEqual(dataframe.values.tolist(), ARRAY)
expected_headers = [
(RESOURCE_TYPE, PROJECT, zone, instance_id, instance_name)
for zone, instance_id, instance_name
in zip(INSTANCE_ZONES, INSTANCE_IDS, INSTANCE_NAMES)]
self.assertEqual(list(dataframe.columns), expected_headers)
self.assertEqual(dataframe.columns.names, NAMES)
self.assertIsNone(dataframe.columns.name)
self.assertEqual(list(dataframe.index), parse_timestamps())
self.assertIsNone(dataframe.index.name)
def test_empty_table_simple_label(self):
dataframe = self._call_fut([], label='instance_name')
self.assertEqual(dataframe.shape, (0, 0))
self.assertIsNone(dataframe.columns.name)
self.assertIsNone(dataframe.index.name)
self.assertIsInstance(dataframe.index, pandas.DatetimeIndex)
def test_empty_table_multiple_labels(self):
NAMES = ['resource_type', 'instance_id']
dataframe = self._call_fut([], labels=NAMES)
self.assertEqual(dataframe.shape, (0, 0))
self.assertEqual(dataframe.columns.names, NAMES)
self.assertIsNone(dataframe.columns.name)
self.assertIsNone(dataframe.index.name)
self.assertIsInstance(dataframe.index, pandas.DatetimeIndex)
def test_empty_table_multiple_labels_with_just_one(self):
NAME = 'instance_id'
NAMES = [NAME]
dataframe = self._call_fut([], labels=NAMES)
self.assertEqual(dataframe.shape, (0, 0))
self.assertEqual(dataframe.columns.names, NAMES)
self.assertEqual(dataframe.columns.name, NAME)
self.assertIsNone(dataframe.index.name)
self.assertIsInstance(dataframe.index, pandas.DatetimeIndex)
def test_empty_table_smart_labels(self):
NAME = 'resource_type'
NAMES = [NAME]
dataframe = self._call_fut([])
self.assertEqual(dataframe.shape, (0, 0))
self.assertEqual(dataframe.columns.names, NAMES)
self.assertEqual(dataframe.columns.name, NAME)
self.assertIsNone(dataframe.index.name)
self.assertIsInstance(dataframe.index, pandas.DatetimeIndex)
class Test__sorted_resource_labels(unittest.TestCase):
def _call_fut(self, labels):
from google.cloud.monitoring._dataframe import _sorted_resource_labels
return _sorted_resource_labels(labels)
def test_empty(self):
self.assertEqual(self._call_fut([]), [])
def test_sorted(self):
from google.cloud.monitoring._dataframe import TOP_RESOURCE_LABELS
EXPECTED = TOP_RESOURCE_LABELS + ('other-1', 'other-2')
self.assertSequenceEqual(self._call_fut(EXPECTED), EXPECTED)
def test_reversed(self):
from google.cloud.monitoring._dataframe import TOP_RESOURCE_LABELS
EXPECTED = TOP_RESOURCE_LABELS + ('other-1', 'other-2')
INPUT = list(reversed(EXPECTED))
self.assertSequenceEqual(self._call_fut(INPUT), EXPECTED)
| apache-2.0 |
fhqgfss/MoHa | moha/modelsystem/lattice.py | 1 | 7577 | '''Lattice for Model Hamiltonian'''
import copy
import numpy as np
import matplotlib.pyplot as plt
site_label_dir = {'0':'A','1':'B','2':'C','3':'D','4':'E'}
site_color_dir = {'0':'b','1':'r','2':'g'}
bond_label_dir = {'0':'a','1':'b','2':'c','3':'d','4':'e'}
bond_color_dir = {'0':'b','1':'r','2':'g'}
class LatticeSite(object):
"""Child class of LatticeSite"""
def __init__(self,coordinate,label=None,color=None,markersize=80,marker='o'):
""" Initialize of lattice site instance
Parameters
----------
coordinate : list
coordinate of the site
label : str
label of the site
color : str
color the the site in matplotlib
marker : str
type of marker in matplotlib
markersize : int
size of the marker in matplotlib
"""
self.coordinate = np.array(coordinate)
self.label = label
self.color = color
self.marker = marker
self.markersize = markersize
class LatticeBond(object):
"""Bond of Lattice"""
def __init__(self,cell1,site1,cell2,site2,color=None,label=None,linewidth=None,linetype=None):
""" Initialize of lattice bond instance
Parameters
----------
cell1 : list
integer coordinate of first cell
site1 : int
index of first site
cell2 : list
integer coordinate of second cell
site2 : int
index of second site
label : str
label of the bond
color : str
color of the line in matplotlib
linewidth : int
integer to represent the width of line in matplotlib
linetype : str
str to represent the type of line in matplotlib
"""
self.cell1 = np.array(cell1)
self.site1 = site1
self.cell2 = np.array(cell2)
self.site2 = site2
coordinate1 = None
coordinate2 = None
self.color = color
self.label = label
self.linewidth = linewidth
self.linetype = linetype
class Cell(object):
"""Primitive cell of the lattice"""
def __init__(self,dimension,a1,a2,a3):
""" Initialize of cell instance
Parameters
----------
dimension : int
dimension of the cell, which can be 1, 2 or 3
a1 : list
first component of primitive vector
a2 : list
second component of primitive vector
a3 : list
third component of primitive vector
"""
self.dimension = dimension
self.a1 = np.array(a1)
self.a2 = np.array(a2)
self.a3 = np.array(a3)
self.sites = []
self.bonds = []
self.Nsites = 0
self.Nbonds = 0
def add_site(self,site):
"""Add a lattice site instance to the cell
Parameters
----------
site : LatticeXSite
lattice site class
"""
if site.label==None:
site.label = site_label_dir[str(self.Nsites)]
if site.color==None:
site.color = site_color_dir[str(self.Nsites)]
self.sites.append(site)
self.Nsites +=1
def add_bond(self,bond):
"""Add a lattice bond instance to the cell
Parameters
----------
bond : LatticeBond
lattice bond class
"""
if bond.label==None:
bond.label = bond_label_dir[str(self.Nbonds)]
if bond.color==None:
bond.color = 'black'
self.bonds.append(bond)
self.Nbonds +=1
class Lattice(object):
"""Class for lattice"""
def __init__(self, cell, shape):
"""Initialize of lattice instance
Parameters
----------
cell : Cell
Cell class
shape : list
a list of three integer
dimension : int
dimension of the lattice, which can be one two or three
Nsites : int
number of sites in the lattice
sites : numpy array
numpy array for element of site object
bonds : list
list of bond object
"""
self.cell = cell
self.shape = shape
self.dimension = cell.dimension
self.Nsites = np.prod(shape) * self.cell.Nsites
self.sites = np.zeros(self.shape+[self.cell.Nsites],dtype='object')
self.bonds = []
self.build_sites()
self.build_bonds()
def build_sites(self):
"""Add all sites to the lattice."""
for i in range(self.shape[0]):
for j in range(self.shape[1]):
for k in range(self.shape[2]):
for s,site in enumerate(self.cell.sites):
newsite = copy.deepcopy(site)
coordinate = self.cell.a1*i+\
self.cell.a2*j+\
self.cell.a3*k
newsite.coordinate += coordinate
self.sites[i,j,k,s] = newsite
def build_bonds(self):
"""Add all bonds to the lattice."""
shape_prime = np.array([self.shape[0]-1,self.shape[1]-1,self.shape[2]-1])
zeros = np.array([0,0,0])
for i in range(self.shape[0]):
for j in range(self.shape[1]):
for k in range(self.shape[2]):
for b,bond in enumerate(self.cell.bonds):
newbond = copy.deepcopy(bond)
newbond.cell1 += [i,j,k]
newbond.cell2 += [i,j,k]
#ToDo make a function to shorten those lines
if np.prod(newbond.cell1 <= shape_prime) and np.prod(newbond.cell2<=shape_prime) and np.prod(zeros <=newbond.cell1) and np.prod(zeros <= newbond.cell2):
newbond.coordinate1 = self.sites[newbond.cell1[0],newbond.cell1[1],newbond.cell1[2],newbond.site1].coordinate
newbond.coordinate2 = self.sites[newbond.cell2[0],newbond.cell2[1],newbond.cell2[2],newbond.site2].coordinate
self.bonds.append(newbond)
def plotsite(self):
"""plot sites with matplotlib."""
if self.dimension==1 or self.dimension==2:
for site in self.sites.flatten():
plt.scatter(site.coordinate[0],site.coordinate[1],marker=site.marker,s=site.markersize,c=site.color)
elif self.dimension==3:
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
for site in self.sites.flatten():
ax.scatter(site.coordinate[0],site.coordinate[1],site.coordinate[2],marker=site.marker,s=site.markersize,c=site.color)
def plotbond(self):
"""plot bonds with matplotlib."""
if self.dimension==1 or self.dimension==2:
for bond in self.bonds:
x = [bond.coordinate1[0],bond.coordinate2[0]]
y = [bond.coordinate1[1],bond.coordinate2[1]]
plt.plot(x,y,c=bond.color)
elif self.dimension==3:
for bond in self.bonds:
x = [bond.coordinate1[0],bond.coordinate2[0]]
y = [bond.coordinate1[1],bond.coordinate2[1]]
z = [bond.coordinate1[2],bond.coordinate2[2]]
plt.plot(x,y,z,c=bond.color)
def plot(self):
"""plot lattice and show it with matplotlib."""
self.plotsite()
self.plotbond()
plt.show()
| mit |
spallavolu/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
dkushner/zipline | tests/modelling/base.py | 6 | 3402 | """
Base class for FFC unit tests.
"""
from functools import wraps
from unittest import TestCase
from numpy import arange, prod
from numpy.random import randn, seed as random_seed
from pandas import date_range, Int64Index, DataFrame
from six import iteritems
from zipline.assets import AssetFinder
from zipline.modelling.engine import SimpleFFCEngine
from zipline.modelling.graph import TermGraph
from zipline.utils.test_utils import make_simple_asset_info, ExplodingObject
from zipline.utils.tradingcalendar import trading_day
def with_defaults(**default_funcs):
"""
Decorator for providing dynamic default values for a method.
Usages:
@with_defaults(foo=lambda self: self.x + self.y)
def func(self, foo):
...
If a value is passed for `foo`, it will be used. Otherwise the function
supplied to `with_defaults` will be called with `self` as an argument.
"""
def decorator(f):
@wraps(f)
def method(self, *args, **kwargs):
for name, func in iteritems(default_funcs):
if name not in kwargs:
kwargs[name] = func(self)
return f(self, *args, **kwargs)
return method
return decorator
with_default_shape = with_defaults(shape=lambda self: self.default_shape)
class BaseFFCTestCase(TestCase):
def setUp(self):
self.__calendar = date_range('2014', '2015', freq=trading_day)
self.__assets = assets = Int64Index(arange(1, 20))
self.__finder = AssetFinder(
make_simple_asset_info(
assets,
self.__calendar[0],
self.__calendar[-1],
),
db_path=':memory:',
create_table=True,
)
self.__mask = self.__finder.lifetimes(self.__calendar[-10:])
@property
def default_shape(self):
"""Default shape for methods that build test data."""
return self.__mask.shape
def run_terms(self, terms, initial_workspace, mask=None):
"""
Compute the given terms, seeding the workspace of our FFCEngine with
`initial_workspace`.
Parameters
----------
terms : dict
Mapping from termname -> term object.
Returns
-------
results : dict
Mapping from termname -> computed result.
"""
engine = SimpleFFCEngine(
ExplodingObject(),
self.__calendar,
self.__finder,
)
mask = mask if mask is not None else self.__mask
return engine.compute_chunk(TermGraph(terms), mask, initial_workspace)
def build_mask(self, array):
ndates, nassets = array.shape
return DataFrame(
array,
# Use the **last** N dates rather than the first N so that we have
# space for lookbacks.
index=self.__calendar[-ndates:],
columns=self.__assets[:nassets],
dtype=bool,
)
@with_default_shape
def arange_data(self, shape, dtype=float):
"""
Build a block of testing data from numpy.arange.
"""
return arange(prod(shape), dtype=dtype).reshape(shape)
@with_default_shape
def randn_data(self, seed, shape):
"""
Build a block of testing data from numpy.random.randn.
"""
random_seed(seed)
return randn(*shape)
| apache-2.0 |
ctk3b/mdtraj | mdtraj/core/topology.py | 1 | 48926 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Peter Eastman, Robert McGibbon
# Contributors: Kyle A. Beauchamp, Matthew Harrigan, Carlos Xavier Hernandez
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import itertools
import numpy as np
import os
import xml.etree.ElementTree as etree
from mdtraj.core import element as elem
from mdtraj.core.residue_names import (_PROTEIN_RESIDUES, _WATER_RESIDUES,
_AMINO_ACID_CODES)
from mdtraj.core.selection import parse_selection
from mdtraj.utils import ilen, import_, ensure_type
from mdtraj.utils.six import string_types
##############################################################################
# Utilities
##############################################################################
def _topology_from_subset(topology, atom_indices):
"""Create a new topology that only contains the supplied indices
Note
----
This really should be a copy constructor (class method) on Topology.
It used to work on OpenMM topologies, but we've diverged where that no
longer works.
Parameters
----------
topology : mdtraj.Topology
The base topology
atom_indices : array_like, dtype=int
The indices of the atoms to keep
"""
newTopology = Topology()
old_atom_to_new_atom = {}
for chain in topology._chains:
newChain = newTopology.add_chain()
for residue in chain._residues:
resSeq = getattr(residue, 'resSeq', None) or residue.index
newResidue = newTopology.add_residue(residue.name, newChain,
resSeq, residue.segment_id)
for atom in residue._atoms:
if atom.index in atom_indices:
try: # OpenMM Topology objects don't have serial attributes, so we have to check first.
serial = atom.serial
except AttributeError:
serial = None
newAtom = newTopology.add_atom(atom.name, atom.element,
newResidue, serial=serial)
old_atom_to_new_atom[atom] = newAtom
bondsiter = topology.bonds
if not hasattr(bondsiter, '__iter__'):
bondsiter = bondsiter()
for atom1, atom2 in bondsiter:
try:
newTopology.add_bond(old_atom_to_new_atom[atom1],
old_atom_to_new_atom[atom2])
except KeyError:
pass
# we only put bonds into the new topology if both of their partners
# were indexed and thus HAVE a new atom
# Delete empty residues
newTopology._residues = [r for r in newTopology._residues if len(r._atoms) > 0]
for chain in newTopology._chains:
chain._residues = [r for r in chain._residues if len(r._atoms) > 0]
# Delete empty chains
newTopology._chains = [c for c in newTopology._chains
if len(c._residues) > 0]
# Re-set the numAtoms and numResidues
newTopology._numAtoms = ilen(newTopology.atoms)
newTopology._numResidues = ilen(newTopology.residues)
return newTopology
##############################################################################
# Classes
##############################################################################
class Topology(object):
"""Topology stores the topological information about a system.
The structure of a Topology object is similar to that of a PDB file.
It consists of a set of Chains (often but not always corresponding to
polymer chains). Each Chain contains a set of Residues, and each Residue
contains a set of Atoms. In addition, the Topology stores a list of which
atom pairs are bonded to each other.
Atom and residue names should follow the PDB 3.0 nomenclature for all
molecules for which one exists.
Attributes
----------
chains : generator
Iterator over all Chains in the Topology.
residues : generator
Iterator over all Residues in the Chain.
atoms : generator
Iterator over all Atoms in the Chain.
Examples
--------
>>> topology = md.load('example.pdb').topology
>>> print(topology)
<mdtraj.Topology with 1 chains, 3 residues, 22 atoms, 21 bonds at 0x105a98e90>
>>> table, bonds = topology.to_dataframe()
>>> print(table.head())
serial name element resSeq resName chainID
0 0 H1 H 1 CYS 0
1 1 CH3 C 1 CYS 0
2 2 H2 H 1 CYS 0
3 3 H3 H 1 CYS 0
4 4 C C 1 CYS 0
>>> # rename residue "CYS" to "CYSS"
>>> table[table['residue'] == 'CYS']['residue'] = 'CYSS'
>>> print(table.head())
serial name element resSeq resName chainID
0 0 H1 H 1 CYSS 0
1 1 CH3 C 1 CYSS 0
2 2 H2 H 1 CYSS 0
3 3 H3 H 1 CYSS 0
4 4 C C 1 CYSS 0
>>> t2 = md.Topology.from_dataframe(table, bonds)
"""
_standardBonds = {}
def __init__(self):
"""Create a new Topology object"""
self._chains = []
self._numResidues = 0
self._numAtoms = 0
self._bonds = []
self._atoms = []
self._residues = []
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "<%s>" % (self._string_summary_basic())
def __repr__(self):
return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self))
def _string_summary_basic(self):
return ("mdtraj.Topology with %d chains, %d residues, "
"%d atoms, %d bonds" % (self.n_chains, self.n_residues,
self.n_atoms, len(self._bonds)))
def copy(self):
"""Return a copy of the topology
Returns
-------
out : Topology
A copy of this topology
"""
out = Topology()
for chain in self.chains:
c = out.add_chain()
for residue in chain.residues:
r = out.add_residue(residue.name, c, residue.resSeq, residue.segment_id)
for atom in residue.atoms:
out.add_atom(atom.name, atom.element, r,
serial=atom.serial)
for a1, a2 in self.bonds:
out.add_bond(a1, a2)
return out
def __copy__(self, *args):
return self.copy()
def __deepcopy__(self, *args):
return self.copy()
def __hash__(self):
hash_value = hash(tuple(self._chains))
hash_value ^= hash(tuple(self._atoms))
hash_value ^= hash(tuple(self._bonds))
hash_value ^= hash(tuple(self._residues))
return hash_value
def join(self, other):
"""Join two topologies together
Parameters
----------
other : Topology
Another topology object
Returns
-------
out : Topology
A joint topology, with all of the atoms/residues/chains/bonds
in each of the individual topologies
"""
if not isinstance(other, Topology):
raise ValueError('other must be an instance of Topology to join')
out = self.copy()
atom_mapping = {}
for chain in other.chains:
c = out.add_chain()
for residue in chain.residues:
r = out.add_residue(residue.name, c, residue.resSeq, residue.segment_id)
for atom in residue.atoms:
a = out.add_atom(atom.name, atom.element, r,
serial=atom.serial)
atom_mapping[atom] = a
for a1, a2 in other.bonds:
out.add_bond(atom_mapping[a1], atom_mapping[a2])
return out
def to_fasta(self, chain=None):
"""Convert this topology into FASTA string
Parameters
----------
chain : Integer, optional, default=None
If specified, will return the FASTA string for this chain in the
Topology.
Returns
-------
fasta : String or list of Strings
A FASTA string for each chain specified.
"""
fasta = lambda c: "".join([res.code for res in c.residues
if res.is_protein and res.code is not None])
if chain is not None:
if not isinstance(chain, int):
raise ValueError('chain must be an Integer.')
return fasta(self._chains[chain])
else:
return [fasta(c) for c in self._chains]
def to_openmm(self, traj=None):
"""Convert this topology into OpenMM topology
Parameters
----------
traj : MDTraj.Trajectory, optional, default=None
If specified, use the first frame from this trajectory to
set the unitcell information in the openmm topology.
Returns
-------
topology : simtk.openmm.app.Topology
This topology, as an OpenMM topology
"""
app = import_('simtk.openmm.app')
mm = import_('simtk.openmm')
u = import_('simtk.unit')
out = app.Topology()
atom_mapping = {}
for chain in self.chains:
c = out.addChain()
for residue in chain.residues:
r = out.addResidue(residue.name, c)
for atom in residue.atoms:
if atom.element is elem.virtual:
element = None
else:
element = app.Element.getBySymbol(atom.element.symbol)
a = out.addAtom(atom.name, element, r)
atom_mapping[atom] = a
for a1, a2 in self.bonds:
out.addBond(atom_mapping[a1], atom_mapping[a2])
if traj is not None:
angles = traj.unitcell_angles[0]
if np.linalg.norm(angles - 90.0) > 1E-4:
raise(ValueError("Unitcell angles must be 90.0 to use "
"in OpenMM topology."))
box_vectors = mm.Vec3(*traj.unitcell_lengths[0]) * u.nanometer
out.setUnitCellDimensions(box_vectors)
return out
@classmethod
def from_openmm(cls, value):
"""Create a mdtraj topology from an OpenMM topology
Parameters
----------
value : simtk.openmm.app.Topology
An OpenMM topology that you wish to convert to a
mdtraj topology.
"""
app = import_('simtk.openmm.app')
if not isinstance(value, app.Topology):
raise TypeError('value must be an OpenMM Topology. '
'You supplied a %s' % type(value))
out = cls()
atom_mapping = {}
for chain in value.chains():
c = out.add_chain()
for residue in chain.residues():
r = out.add_residue(residue.name, c)
for atom in residue.atoms():
if atom.element is None:
element = elem.virtual
else:
element = elem.get_by_symbol(atom.element.symbol)
a = out.add_atom(atom.name, element, r)
atom_mapping[atom] = a
for a1, a2 in value.bonds():
out.add_bond(atom_mapping[a1], atom_mapping[a2])
return out
def to_dataframe(self):
"""Convert this topology into a pandas dataframe
Returns
-------
atoms : pandas.DataFrame
The atoms in the topology, represented as a data frame.
bonds : np.ndarray
The bonds in this topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond.
"""
pd = import_('pandas')
data = [(atom.serial, atom.name, atom.element.symbol,
atom.residue.resSeq, atom.residue.name,
atom.residue.chain.index,atom.segment_id) for atom in self.atoms]
atoms = pd.DataFrame(data, columns=["serial", "name", "element",
"resSeq", "resName", "chainID","segmentID"])
bonds = np.array([(a.index, b.index) for (a, b) in self.bonds])
return atoms, bonds
@classmethod
def from_dataframe(cls, atoms, bonds=None):
"""Create a mdtraj topology from a pandas data frame
Parameters
----------
atoms : pandas.DataFrame
The atoms in the topology, represented as a data frame. This data
frame should have columns "serial" (atom index), "name" (atom name),
"element" (atom's element), "resSeq" (index of the residue)
"resName" (name of the residue), "chainID" (index of the chain),
and optionally "segmentID", following the same conventions
as wwPDB 3.0 format.
bonds : np.ndarray, shape=(n_bonds, 2), dtype=int, optional
The bonds in the topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond. Specifiying
bonds here is optional. To create standard protein bonds, you can
use `create_standard_bonds` to "fill in" the bonds on your newly
created Topology object
See Also
--------
create_standard_bonds
"""
pd = import_('pandas')
if bonds is None:
bonds = np.zeros((0, 2))
for col in ["name", "element", "resSeq",
"resName", "chainID", "serial"]:
if col not in atoms.columns:
raise ValueError('dataframe must have column %s' % col)
if "segmentID" not in atoms.columns:
atoms["segmentID"] = ""
out = cls()
if not isinstance(atoms, pd.DataFrame):
raise TypeError('atoms must be an instance of pandas.DataFrame. '
'You supplied a %s' % type(atoms))
if not isinstance(bonds, np.ndarray):
raise TypeError('bonds must be an instance of numpy.ndarray. '
'You supplied a %s' % type(bonds))
if not np.all(np.arange(len(atoms)) == atoms.index):
raise ValueError('atoms must be uniquely numbered '
'starting from zero.')
out._atoms = [None for i in range(len(atoms))]
for ci in np.unique(atoms['chainID']):
chain_atoms = atoms[atoms['chainID'] == ci]
c = out.add_chain()
for ri in np.unique(chain_atoms['resSeq']):
residue_atoms = chain_atoms[chain_atoms['resSeq'] == ri]
rnames = residue_atoms['resName']
residue_name = np.array(rnames)[0]
segids = residue_atoms['segmentID']
segment_id = np.array(segids)[0]
if not np.all(rnames == residue_name):
raise ValueError('All of the atoms with residue index %d '
'do not share the same residue name' % ri)
r = out.add_residue(residue_name, c, ri,segment_id)
for atom_index, atom in residue_atoms.iterrows():
atom_index = int(atom_index) # Fixes bizarre hashing issue on Py3K. See #545
a = Atom(atom['name'], elem.get_by_symbol(atom['element']),
atom_index, r, serial=atom['serial'])
out._atoms[atom_index] = a
r._atoms.append(a)
for ai1, ai2 in bonds:
out.add_bond(out.atom(ai1), out.atom(ai2))
out._numAtoms = out.n_atoms
return out
def to_bondgraph(self):
"""Create a NetworkX graph from the atoms and bonds in this topology
Returns
-------
g : nx.Graph
A graph whose nodes are the Atoms in this topology, and
whose edges are the bonds
See Also
--------
atoms
bonds
Notes
-----
This method requires the NetworkX python package.
"""
nx = import_('networkx')
g = nx.Graph()
g.add_nodes_from(self.atoms)
g.add_edges_from(self.bonds)
return g
def __eq__(self, other):
"""Are two topologies equal?
Parameters
----------
other : object
The object to compare to
Returns
-------
equality : bool
Are the two topologies identical?
"""
if not isinstance(other, Topology):
return False
if self is other:
return True
if len(self._chains) != len(other._chains):
return False
for c1, c2 in zip(self.chains, other.chains):
if c1.index != c2.index:
return False
if len(c1._residues) != len(c2._residues):
return False
for r1, r2 in zip(c1.residues, c2.residues):
if (r1.index != r1.index) or (r1.name != r2.name): # or (r1.resSeq != r2.resSeq):
return False
if len(r1._atoms) != len(r2._atoms):
return False
for a1, a2 in zip(r1.atoms, r2.atoms):
if (a1.index != a2.index) or (a1.name != a2.name):
return False
if a1.element is not a2.element:
return False
# for attr in ['atomic_number', 'name', 'symbol']:
# if getattr(a1.element, attr) != getattr(a2.element, attr):
# return False
if len(self._bonds) != len(other._bonds):
return False
# the bond ordering is somewhat ambiguous, so try and fix it for comparison
self_sorted_bonds = sorted([(a1.index, b1.index)
for (a1, b1) in self.bonds])
other_sorted_bonds = sorted([(a2.index, b2.index)
for (a2, b2) in other.bonds])
for i in range(len(self._bonds)):
(a1, b1) = self_sorted_bonds[i]
(a2, b2) = other_sorted_bonds[i]
if (a1 != a2) or (b1 != b2):
return False
return True
def add_chain(self):
"""Create a new Chain and add it to the Topology.
Returns
-------
chain : mdtraj.topology.Chain
the newly created Chain
"""
chain = Chain(len(self._chains), self)
self._chains.append(chain)
return chain
def add_residue(self, name, chain, resSeq=None, segment_id=""):
"""Create a new Residue and add it to the Topology.
Parameters
----------
name : str
The name of the residue to add
chain : mdtraj.topology.Chain
The Chain to add it to
resSeq : int, optional
Residue sequence number, such as from a PDB record. These sequence
numbers are arbitrary, and do not necessarily start at 0 (or 1).
If not supplied, the resSeq attribute will be set to the
residue's sequential (0 based) index.
segment_id : str, optional
A label for the segment to which this residue belongs
Returns
-------
residue : mdtraj.topology.Residue
The newly created Residue
"""
if resSeq is None:
resSeq = self._numResidues
residue = Residue(name, self._numResidues, chain, resSeq, segment_id)
self._residues.append(residue)
self._numResidues += 1
chain._residues.append(residue)
return residue
def add_atom(self, name, element, residue, serial=None):
"""Create a new Atom and add it to the Topology.
Parameters
----------
name : str
The name of the atom to add
element : mdtraj.element.Element
The element of the atom to add
residue : mdtraj.topology.Residue
The Residue to add it to
serial : int
Serial number associated with the atom.
Returns
-------
atom : mdtraj.topology.Atom
the newly created Atom
"""
if element is None:
element = elem.virtual
atom = Atom(name, element, self._numAtoms, residue, serial=serial)
self._atoms.append(atom)
self._numAtoms += 1
residue._atoms.append(atom)
return atom
def add_bond(self, atom1, atom2):
"""Create a new bond and add it to the Topology.
Parameters
----------
atom1 : mdtraj.topology.Atom
The first Atom connected by the bond
atom2 : mdtraj.topology.Atom
The second Atom connected by the bond
"""
if atom1.index < atom2.index:
self._bonds.append((atom1, atom2))
else:
self._bonds.append((atom2, atom1))
def chain(self, index):
"""Get a specific chain by index. These indices
start from zero.
Parameters
----------
index : int
The index of the chain to select.
Returns
-------
chain : Chain
The `index`-th chain in the topology.
"""
return self._chains[index]
@property
def chains(self):
"""Iterator over all Chains in the Topology.
Returns
-------
chainiter : listiterator
Iterator over all Chains in the Topology.
"""
return iter(self._chains)
@property
def n_chains(self):
"""Get the number of chains in the Topology"""
return len(self._chains)
def residue(self, index):
"""Get a specific residue by index. These indices
start from zero.
Parameters
----------
index : int
The index of the residue to select.
Returns
-------
residue : Residue
The `index`-th residue in the topology.
"""
return self._residues[index]
@property
def residues(self):
"""Iterator over all Residues in the Topology.
Returns
-------
residueiter : generator
Iterator over all Residues in the Topology.
"""
for chain in self._chains:
for residue in chain._residues:
yield residue
@property
def n_residues(self):
"""Get the number of residues in the Topology. """
return len(self._residues)
def atom(self, index):
"""Get a specific atom by index. These indices
start from zero.
Parameters
----------
index : int
The index of the atom to select.
Returns
-------
atom : Atom
The `index`-th atom in the topology.
"""
return self._atoms[index]
@property
def atoms(self):
"""Iterator over all Atoms in the Topology.
Returns
-------
atomiter : generator
Iterator over all Atoms in the Topology.
"""
for chain in self._chains:
for residue in chain._residues:
for atom in residue._atoms:
yield atom
def atoms_by_name(self, name):
"""Iterator over all Atoms in the Topology with a specified name
Parameters
----------
name : str
The particular atom name of interest.
Examples
--------
>>> for atom in topology.atoms_by_name('CA'):
... print(atom)
Returns
-------
atomiter : generator
"""
for atom in self.atoms:
if atom.name == name:
yield atom
@property
def n_atoms(self):
"""Get the number of atoms in the Topology"""
return len(self._atoms)
@property
def bonds(self):
"""Iterator over all bonds (each represented as a tuple of two Atoms) in the Topology.
Returns
-------
atomiter : generator
Iterator over all tuple of Atoms in the Trajectory involved in a bond.
"""
return iter(self._bonds)
@property
def n_bonds(self):
"""Get the number of bonds in the Topology"""
return len(self._bonds)
def create_standard_bonds(self):
"""Create bonds based on the atom and residue names for all standard residue types.
"""
if len(Topology._standardBonds) == 0:
# Load the standard bond defitions.
tree = etree.parse(os.path.join(os.path.dirname(__file__), '..',
'formats', 'pdb', 'data', 'residues.xml'))
for residue in tree.getroot().findall('Residue'):
bonds = []
Topology._standardBonds[residue.attrib['name']] = bonds
for bond in residue.findall('Bond'):
bonds.append((bond.attrib['from'], bond.attrib['to']))
for chain in self._chains:
# First build a map of atom names to atoms.
atomMaps = []
for residue in chain._residues:
atomMap = {}
atomMaps.append(atomMap)
for atom in residue._atoms:
atomMap[atom.name] = atom
# Loop over residues and construct bonds.
for i in range(len(chain._residues)):
name = chain._residues[i].name
if name in Topology._standardBonds:
for bond in Topology._standardBonds[name]:
if bond[0].startswith('-') and i > 0:
fromResidue = i-1
fromAtom = bond[0][1:]
elif (bond[0].startswith('+')
and i < len(chain._residues)):
fromResidue = i+1
fromAtom = bond[0][1:]
else:
fromResidue = i
fromAtom = bond[0]
if bond[1].startswith('-') and i > 0:
toResidue = i-1
toAtom = bond[1][1:]
elif (bond[1].startswith('+')
and i < len(chain._residues)):
toResidue = i+1
toAtom = bond[1][1:]
else:
toResidue = i
toAtom = bond[1]
if (fromAtom in atomMaps[fromResidue]
and toAtom in atomMaps[toResidue]):
self.add_bond(atomMaps[fromResidue][fromAtom],
atomMaps[toResidue][toAtom])
def create_disulfide_bonds(self, positions):
"""Identify disulfide bonds based on proximity and add them to the Topology.
Parameters
----------
positions : list
The list of atomic positions based on which to identify bonded atoms
"""
def isCyx(res):
names = [atom.name for atom in res._atoms]
return 'SG' in names and 'HG' not in names
cyx = [res for res in self.residues
if res.name == 'CYS' and isCyx(res)]
atomNames = [[atom.name for atom in res._atoms] for res in cyx]
for i in range(len(cyx)):
sg1 = cyx[i]._atoms[atomNames[i].index('SG')]
pos1 = positions[sg1.index]
for j in range(i):
sg2 = cyx[j]._atoms[atomNames[j].index('SG')]
pos2 = positions[sg2.index]
delta = [x-y for (x, y) in zip(pos1, pos2)]
distance = np.sqrt(
delta[0]*delta[0] + delta[1]*delta[1] + delta[2]*delta[2])
if distance < 0.3: # this is supposed to be nm. I think we're good
self.add_bond(sg1, sg2)
def subset(self, atom_indices):
"""Create a new Topology from a subset of the atoms in an existing topology.
Notes
-----
The existing topology will not be altered.
Parameters
----------
atom_indices : array_like
A list of the indices corresponding to the atoms in that you'd
like to retain.
"""
return _topology_from_subset(self, atom_indices)
def select_expression(self, selection_string):
"""Translate a atom selection expression into a pure python expression.
Parameters
----------
selection_string : str
An expression in the MDTraj atom selection DSL
Examples
--------
>>> topology.select_expression('name O and water')
"[atom.index for atom in topology.atoms if ((atom.name == 'O') and atom.residue.is_water)]")
Returns
-------
python_string : str
A string containing a pure python expression, equivalent to the
selection expression.
"""
condition = parse_selection(selection_string).source
fmt_string = "[atom.index for atom in topology.atoms if {condition}]"
return fmt_string.format(condition=condition)
def select(self, selection_string):
"""Execute a selection against the topology
Parameters
----------
selection_string : str
An expression in the MDTraj atom selection DSL
Examples
--------
>>> topology.select('name O and water')
array([1, 3, 5, 10, ...])
Returns
-------
indices : np.ndarray, dtype=int, ndim=1
Array of the indices of the atoms matching the selection expression.
See Also
--------
select_expression, mdtraj.core.selection.parse_selection
"""
filter_func = parse_selection(selection_string).expr
indices = np.array([a.index for a in self.atoms if filter_func(a)])
return indices
def select_atom_indices(self, selection='minimal'):
"""Get the indices of biologically-relevant groups by name.
Parameters
----------
selection : {'all', 'alpha', 'minimal', 'heavy', 'water'}
What types of atoms to select.
``all``
All atoms
``alpha``
Protein residue alpha carbons
``minimal``
Keep the atoms in protein residues with names in {CA, CB, C, N, O}
``heavy``
All non-hydrogen protein atoms.
``water``
Water oxygen atoms
Returns
----------
indices : np.ndarray (N,)
An array of the indices of the selected atoms.
"""
selection = selection.lower()
options = ['all', 'alpha', 'minimal', 'heavy', 'water']
if selection == 'all':
atom_indices = np.arange(self.n_atoms)
elif selection == 'alpha':
atom_indices = [a.index for a in self.atoms if
a.name == 'CA'
and a.residue.is_protein]
elif selection == 'minimal':
atom_indices = [a.index for a in self.atoms if
a.name in ['CA', 'CB', 'C', 'N', 'O']
and a.residue.is_protein]
elif selection == 'heavy':
atom_indices = [a.index for a in self.atoms if
a.element != elem.hydrogen
and a.residue.is_protein]
elif selection == 'water':
atom_indices = [a.index for a in self.atoms if
a.name in ['O', 'OW']
and a.residue.is_water]
else:
raise ValueError(
'%s is not a valid option. Selection must be one of %s' % (
selection, ', '.join(options)))
indices = np.array(atom_indices)
return indices
def select_pairs(self, selection1=None, selection2=None):
"""Generate unique pairs of atom indices.
If a selecton is a string, it will be resolved using the atom selection
DSL, otherwise it is expected to be an array of atom indices.
Parameters
----------
selection1 : str or array-like, shape=(n_indices, ), dtype=int
A selection for `select()` or an array of atom indices.
selection2 : str or array-like, shape=(n_indices, ), dtype=int
A selection for `select()` or an array of atom indices.
Returns
-------
pairs : array-like, shape=(n_pairs, 2), dtype=int
Each row gives the indices of two atoms.
"""
# Resolve selections using the atom selection DSL...
if isinstance(selection1, string_types):
a_indices = self.select(selection1)
else: # ...or use a provided array of indices.
a_indices = ensure_type(selection1, dtype=np.int32, ndim=1,
name='a_indices', warn_on_cast=False)
if isinstance(selection2, string_types):
b_indices = self.select(selection2)
else:
b_indices = ensure_type(selection2, dtype=np.int32, ndim=1,
name='b_indices', warn_on_cast=False)
a_indices.sort()
b_indices.sort()
# Create unique pairs from the indices.
# In the cases where a_indices and b_indices are identical or mutually
# exclusive, we can utilize a more efficient and memory friendly
# approach by removing the intermediate set creation required in
# the general case.
if np.array_equal(a_indices, b_indices):
pairs = self._unique_pairs_equal(a_indices)
elif len(np.intersect1d(a_indices, b_indices)) == 0:
pairs = self._unique_pairs_mutually_exclusive(a_indices, b_indices)
else:
pairs = self._unique_pairs(a_indices, b_indices)
return pairs
@classmethod
def _unique_pairs(cls, a_indices, b_indices):
return np.array(list(set(
(a, b) if a > b else (b, a)
for a, b in itertools.product(a_indices, b_indices)
if a != b)), dtype=np.int32)
@classmethod
def _unique_pairs_mutually_exclusive(cls, a_indices, b_indices):
pairs = np.fromiter(itertools.chain.from_iterable(
itertools.product(a_indices, b_indices)),
dtype=np.int32, count=len(a_indices) * len(b_indices) * 2)
return np.vstack((pairs[::2], pairs[1::2])).T
@classmethod
def _unique_pairs_equal(cls, a_indices):
pairs = np.fromiter(itertools.chain.from_iterable(
itertools.combinations(a_indices, 2)),
dtype=np.int32, count=len(a_indices) * (len(a_indices) - 1))
return np.vstack((pairs[::2], pairs[1::2])).T
def find_molecules(self):
"""Identify molecules based on bonds.
A molecule is defined as a set of atoms that are connected to each other by bonds.
This method uses the list of bonds to divide up the Topology's atoms into molecules.
Returns
-------
molecules : list of sets
Each entry represents one molecule, and is the set of all Atoms in that molecule
"""
if len(self._bonds) == 0 and any(res.n_atoms > 1 for res in self._residues):
raise ValueError('Cannot identify molecules because this Topology does not include bonds')
# Make a list of every other atom to which each atom is connected.
num_atoms = self.n_atoms
atom_bonds = [[] for i in range(num_atoms)]
for atom1, atom2 in self.bonds:
atom_bonds[atom1.index].append(atom2.index)
atom_bonds[atom2.index].append(atom1.index)
# This is essentially a recursive algorithm, but it is reformulated as a loop to avoid
# stack overflows. It selects an atom, marks it as a new molecule, then recursively
# marks every atom bonded to it as also being in that molecule.
atom_molecule = [-1]*num_atoms
num_molecules = 0
for i in range(num_atoms):
if atom_molecule[i] == -1:
# Start a new molecule.
atom_stack = [i]
neighbor_stack = [0]
molecule = num_molecules
num_molecules += 1
# Recursively tag all the bonded atoms.
while len(atom_stack) > 0:
atom = atom_stack[-1]
atom_molecule[atom] = molecule
while neighbor_stack[-1] < len(atom_bonds[atom]) and atom_molecule[atom_bonds[atom][neighbor_stack[-1]]] != -1:
neighbor_stack[-1] += 1
if neighbor_stack[-1] < len(atom_bonds[atom]):
atom_stack.append(atom_bonds[atom][neighbor_stack[-1]])
neighbor_stack.append(0)
else:
del atom_stack[-1]
del neighbor_stack[-1]
# Build the final output.
molecules = [set() for i in range(num_molecules)]
for atom in self.atoms:
molecules[atom_molecule[atom.index]].add(atom)
return molecules
class Chain(object):
"""A Chain object represents a chain within a Topology.
Attributes
----------
index : int
The index of the Chain within its Topology
topology : mdtraj.Topology
The Topology this Chain belongs to
residues : generator
Iterator over all Residues in the Chain.
atoms : generator
Iterator over all Atoms in the Chain.
"""
def __init__(self, index, topology):
"""Construct a new Chain. You should call add_chain() on the Topology instead of calling this directly."""
# The index of the Chain within its Topology
self.index = index
# The Topology this Chain belongs to
self.topology = topology
self._residues = []
@property
def residues(self):
"""Iterator over all Residues in the Chain.
Returns
-------
residueiter : listiterator
Iterator over all Residues in the Topology.
"""
return iter(self._residues)
def residue(self, index):
"""Get a specific residue in this Chain.
Parameters
----------
index : int
The index of the residue to select.
Returns
-------
residue : Residue
"""
return self._residues[index]
@property
def n_residues(self):
"""Get the number of residues in this Chain. """
return len(self._residues)
@property
def atoms(self):
"""Iterator over all Atoms in the Chain.
Returns
-------
atomiter : generator
Iterator over all Atoms in the Chain.
"""
for residue in self._residues:
for atom in residue._atoms:
yield atom
def atoms_by_name(self, name):
"""Iterator over all Atoms in the Chain with a specified name.
Parameters
----------
name : str
The particular atom name of interest.
Examples
--------
>>> for atom in chain.atoms_by_name('CA'):
... print(atom)
Returns
-------
atomiter : generator
"""
for atom in self.atoms:
if atom.name == name:
yield atom
def atom(self, index):
"""Get a specific atom in this Chain.
Parameters
----------
index : int
The index of the atom to select.
Returns
-------
atom : Atom
"""
# this could be made faster by caching the list
# of atoms internally if necessary
return next(itertools.islice(self.atoms, index, index + 1))
@property
def n_atoms(self):
"""Get the number of atoms in this Chain"""
return sum(r.n_atoms for r in self._residues)
class Residue(object):
"""A Residue object represents a residue within a Topology.
Attributes
----------
name : str
The name of the Residue
index : int
The index of the Residue within its Topology
chain : mdtraj.topology.Chain
The chain within which this residue belongs
resSeq : int
The residue sequence number
segment_id : str, optional
A label for the segment to which this residue belongs
"""
def __init__(self, name, index, chain, resSeq, segment_id=''):
"""Construct a new Residue. You should call add_residue()
on the Topology instead of calling this directly."""
self.name = name
self.index = index
self.chain = chain
self.resSeq = resSeq
self.segment_id = segment_id
self._atoms = []
@property
def atoms(self):
"""Iterator over all Atoms in the Residue.
Returns
-------
atomiter : listiterator
Iterator over all Atoms in the Residue.
"""
return iter(self._atoms)
def atoms_by_name(self, name):
"""Iterator over all Atoms in the Residue with a specified name
Parameters
----------
name : str
The particular atom name of interest.
Examples
--------
>>> for atom in residue.atoms_by_name('CA'):
... print(atom)
Returns
-------
atomiter : generator
"""
for atom in self.atoms:
if atom.name == name:
yield atom
def atom(self, index_or_name):
"""Get a specific atom in this Residue.
Parameters
----------
index_or_name : {int, str}
Either a (zero-based) index, or the name of the atom. If a string
is passed in, the first atom -- in index order -- with a matching
name wil be returned.
Returns
-------
atom : Atom
"""
try:
return self._atoms[index_or_name]
except TypeError:
try:
return next(self.atoms_by_name(index_or_name))
except StopIteration:
raise KeyError('no matching atom found')
@property
def n_atoms(self):
"""Get the number of atoms in this Residue"""
return len(self._atoms)
@property
def is_protein(self):
"""Whether the residue is one found in proteins."""
return self.name in _PROTEIN_RESIDUES
@property
def code(self):
"""Get the one letter code for this Residue"""
if self.is_protein:
return _AMINO_ACID_CODES[self.name]
else:
return None
@property
def is_water(self):
"""Whether the residue is water.
Residue names according to VMD
References
----------
http://www.ks.uiuc.edu/Research/vmd/vmd-1.3/ug/node133.html
"""
return self.name in _WATER_RESIDUES
@property
def is_nucleic(self):
"""Whether the residue is one found in nucleic acids."""
raise NotImplementedError
def __str__(self):
return '%s%s' % (self.name, self.resSeq)
def __repr__(self):
return str(self)
class Atom(object):
"""An Atom object represents a residue within a Topology.
Attributes
----------
name : str
The name of the Atom
element : mdtraj.element.Element
The element of the Atoms
index : int
The index of the Atom within its Topology
residue : mdtraj.topology.Residue
The Residue this Atom belongs to
serial : int
The serial number from the PDB specification. Unlike index,
this may not be contiguous or 0-indexed.
"""
def __init__(self, name, element, index, residue, serial=None):
"""Construct a new Atom. You should call add_atom() on the Topology instead of calling this directly."""
# The name of the Atom
self.name = name
# That Atom's element
self.element = element
# The index of the Atom within its Topology
self.index = index
# The Residue this Atom belongs to
self.residue = residue
# The not-necessarily-contiguous "serial" number from the PDB spec
self.serial = serial
@property
def n_bonds(self):
"""Number of bonds in which the atom participates."""
# TODO: this info could be cached.
return ilen(bond for bond in self.residue.chain.topology.bonds
if self in bond)
@property
def is_backbone(self):
"""Whether the atom is in the backbone of a protein residue"""
return (self.name in set(['C', 'CA', 'N', 'O'])
and self.residue.is_protein)
@property
def is_sidechain(self):
"""Whether the atom is in the sidechain of a protein residue"""
return (self.name not in set(['C', 'CA', 'N', 'O'])
and self.residue.is_protein)
@property
def segment_id(self):
"""User specified segment_id of the residue to which this atom belongs"""
return self.residue.segment_id
def __eq__(self, other):
""" Check whether two Atom objects are equal. """
if self.name != other.name:
return False
if self.index != other.index:
return False
if self.element.name != other.element.name:
return False
if self.residue.name != other.residue.name:
return False
if self.residue.index != other.residue.index:
return False
if self.residue.chain.index != other.residue.chain.index:
return False
return True
def __hash__(self):
"""A quick comparison. """
return self.index
def __str__(self):
return '%s-%s' % (self.residue, self.name)
def __repr__(self):
return str(self)
| lgpl-2.1 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/sankey.py | 8 | 40828 | #!/usr/bin/env python
"""
Module for creating Sankey diagrams using matplotlib
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import zip
# Original version by Yannick Copin ([email protected]) 10/2/2010, available
# at:
# http://matplotlib.org/examples/api/sankey_demo_old.html
# Modifications by Kevin Davies ([email protected]) 6/3/2011:
# --Used arcs for the curves (so that the widths of the paths are uniform)
# --Converted the function to a class and created methods to join multiple
# simple Sankey diagrams
# --Provided handling for cases where the total of the inputs isn't 100
# Now, the default layout is based on the assumption that the inputs sum to
# 1. A scaling parameter can be used in other cases.
# --The call structure was changed to be more explicit about layout,
# including the length of the trunk, length of the paths, gap between the
# paths, and the margin around the diagram.
# --Allowed the lengths of paths to be adjusted individually, with an option
# to automatically justify them
# --The call structure was changed to make the specification of path
# orientation more flexible. Flows are passed through one array, with
# inputs being positive and outputs being negative. An orientation
# argument specifies the direction of the arrows. The "main"
# inputs/outputs are now specified via an orientation of 0, and there may
# be several of each.
# --Changed assertions to ValueError to catch common calling errors (by
# Francesco Montesano, [email protected])
# --Added the physical unit as a string argument to be used in the labels, so
# that the values of the flows can usually be applied automatically
# --Added an argument for a minimum magnitude below which flows are not shown
# --Added a tapered trunk in the case that the flows do not sum to 0
# --Allowed the diagram to be rotated
import numpy as np
from matplotlib.cbook import iterable, Bunch
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.transforms import Affine2D
from matplotlib import verbose
from matplotlib import docstring
__author__ = "Kevin L. Davies"
__credits__ = ["Yannick Copin"]
__license__ = "BSD"
__version__ = "2011/09/16"
# Angles [deg/90]
RIGHT = 0
UP = 1
# LEFT = 2
DOWN = 3
class Sankey(object):
"""
Sankey diagram in matplotlib
Sankey diagrams are a specific type of flow diagram, in which
the width of the arrows is shown proportionally to the flow
quantity. They are typically used to visualize energy or
material or cost transfers between processes.
`Wikipedia (6/1/2011) <http://en.wikipedia.org/wiki/Sankey_diagram>`_
"""
def __init__(self, ax=None, scale=1.0, unit='', format='%G', gap=0.25,
radius=0.1, shoulder=0.03, offset=0.15, head_angle=100,
margin=0.4, tolerance=1e-6, **kwargs):
"""
Create a new Sankey instance.
Optional keyword arguments:
=============== ===================================================
Field Description
=============== ===================================================
*ax* axes onto which the data should be plotted
If *ax* isn't provided, new axes will be created.
*scale* scaling factor for the flows
*scale* sizes the width of the paths in order to
maintain proper layout. The same scale is applied
to all subdiagrams. The value should be chosen
such that the product of the scale and the sum of
the inputs is approximately 1.0 (and the product of
the scale and the sum of the outputs is
approximately -1.0).
*unit* string representing the physical unit associated
with the flow quantities
If *unit* is None, then none of the quantities are
labeled.
*format* a Python number formatting string to be used in
labeling the flow as a quantity (i.e., a number
times a unit, where the unit is given)
*gap* space between paths that break in/break away
to/from the top or bottom
*radius* inner radius of the vertical paths
*shoulder* size of the shoulders of output arrowS
*offset* text offset (from the dip or tip of the arrow)
*head_angle* angle of the arrow heads (and negative of the angle
of the tails) [deg]
*margin* minimum space between Sankey outlines and the edge
of the plot area
*tolerance* acceptable maximum of the magnitude of the sum of
flows
The magnitude of the sum of connected flows cannot
be greater than *tolerance*.
=============== ===================================================
The optional arguments listed above are applied to all subdiagrams so
that there is consistent alignment and formatting.
If :class:`Sankey` is instantiated with any keyword arguments other
than those explicitly listed above (``**kwargs``), they will be passed
to :meth:`add`, which will create the first subdiagram.
In order to draw a complex Sankey diagram, create an instance of
:class:`Sankey` by calling it without any kwargs::
sankey = Sankey()
Then add simple Sankey sub-diagrams::
sankey.add() # 1
sankey.add() # 2
#...
sankey.add() # n
Finally, create the full diagram::
sankey.finish()
Or, instead, simply daisy-chain those calls::
Sankey().add().add... .add().finish()
.. seealso::
:meth:`add`
:meth:`finish`
**Examples:**
.. plot:: mpl_examples/api/sankey_demo_basics.py
"""
# Check the arguments.
if gap < 0:
raise ValueError(
"The gap is negative.\nThis isn't allowed because it "
"would cause the paths to overlap.")
if radius > gap:
raise ValueError(
"The inner radius is greater than the path spacing.\n"
"This isn't allowed because it would cause the paths to overlap.")
if head_angle < 0:
raise ValueError(
"The angle is negative.\nThis isn't allowed "
"because it would cause inputs to look like "
"outputs and vice versa.")
if tolerance < 0:
raise ValueError(
"The tolerance is negative.\nIt must be a magnitude.")
# Create axes if necessary.
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
self.diagrams = []
# Store the inputs.
self.ax = ax
self.unit = unit
self.format = format
self.scale = scale
self.gap = gap
self.radius = radius
self.shoulder = shoulder
self.offset = offset
self.margin = margin
self.pitch = np.tan(np.pi * (1 - head_angle / 180.0) / 2.0)
self.tolerance = tolerance
# Initialize the vertices of tight box around the diagram(s).
self.extent = np.array((np.inf, -np.inf, np.inf, -np.inf))
# If there are any kwargs, create the first subdiagram.
if len(kwargs):
self.add(**kwargs)
def _arc(self, quadrant=0, cw=True, radius=1, center=(0, 0)):
"""
Return the codes and vertices for a rotated, scaled, and translated
90 degree arc.
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*quadrant* uses 0-based indexing (0, 1, 2, or 3)
*cw* if True, clockwise
*center* (x, y) tuple of the arc's center
=============== ==========================================
"""
# Note: It would be possible to use matplotlib's transforms to rotate,
# scale, and translate the arc, but since the angles are discrete,
# it's just as easy and maybe more efficient to do it here.
ARC_CODES = [Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4]
# Vertices of a cubic Bezier curve approximating a 90 deg arc
# These can be determined by Path.arc(0,90).
ARC_VERTICES = np.array([[1.00000000e+00, 0.00000000e+00],
[1.00000000e+00, 2.65114773e-01],
[8.94571235e-01, 5.19642327e-01],
[7.07106781e-01, 7.07106781e-01],
[5.19642327e-01, 8.94571235e-01],
[2.65114773e-01, 1.00000000e+00],
# Insignificant
# [6.12303177e-17, 1.00000000e+00]])
[0.00000000e+00, 1.00000000e+00]])
if quadrant == 0 or quadrant == 2:
if cw:
vertices = ARC_VERTICES
else:
vertices = ARC_VERTICES[:, ::-1] # Swap x and y.
elif quadrant == 1 or quadrant == 3:
# Negate x.
if cw:
# Swap x and y.
vertices = np.column_stack((-ARC_VERTICES[:, 1],
ARC_VERTICES[:, 0]))
else:
vertices = np.column_stack((-ARC_VERTICES[:, 0],
ARC_VERTICES[:, 1]))
if quadrant > 1:
radius = -radius # Rotate 180 deg.
return list(zip(ARC_CODES, radius * vertices +
np.tile(center, (ARC_VERTICES.shape[0], 1))))
def _add_input(self, path, angle, flow, length):
"""
Add an input to a path and return its tip and label locations.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
dipdepth = (flow / 2) * self.pitch
if angle == RIGHT:
x -= length
dip = [x + dipdepth, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, dip),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x + self.gap, y + flow])])
label_location = [dip[0] - self.offset, dip[1]]
else: # Vertical
x -= self.gap
if angle == UP:
sign = 1
else:
sign = -1
dip = [x - flow / 2, y - sign * (length - dipdepth)]
if angle == DOWN:
quadrant = 2
else:
quadrant = 1
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x + self.radius,
y - sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y - sign * length]),
(Path.LINETO, dip),
(Path.LINETO, [x - flow, y - sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=flow + self.radius,
center=(x + self.radius,
y - sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [dip[0], dip[1] - sign * self.offset]
return dip, label_location
def _add_output(self, path, angle, flow, length):
"""
Append an output to a path and return its tip and label locations.
.. note:: *flow* is negative for an output.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
tipheight = (self.shoulder - flow / 2) * self.pitch
if angle == RIGHT:
x += length
tip = [x + tipheight, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, [x, y + self.shoulder]),
(Path.LINETO, tip),
(Path.LINETO, [x, y - self.shoulder + flow]),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x - self.gap, y + flow])])
label_location = [tip[0] + self.offset, tip[1]]
else: # Vertical
x += self.gap
if angle == UP:
sign = 1
else:
sign = -1
tip = [x - flow / 2.0, y + sign * (length + tipheight)]
if angle == UP:
quadrant = 3
else:
quadrant = 0
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x - self.radius,
y + sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y + sign * length]),
(Path.LINETO, [x - self.shoulder,
y + sign * length]),
(Path.LINETO, tip),
(Path.LINETO, [x + self.shoulder - flow,
y + sign * length]),
(Path.LINETO, [x - flow, y + sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=self.radius - flow,
center=(x - self.radius,
y + sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [tip[0], tip[1] + sign * self.offset]
return tip, label_location
def _revert(self, path, first_action=Path.LINETO):
"""
A path is not simply revertable by path[::-1] since the code
specifies an action to take from the **previous** point.
"""
reverse_path = []
next_code = first_action
for code, position in path[::-1]:
reverse_path.append((next_code, position))
next_code = code
return reverse_path
# This might be more efficient, but it fails because 'tuple' object
# doesn't support item assignment:
# path[1] = path[1][-1:0:-1]
# path[1][0] = first_action
# path[2] = path[2][::-1]
# return path
@docstring.dedent_interpd
def add(self, patchlabel='', flows=None, orientations=None, labels='',
trunklength=1.0, pathlengths=0.25, prior=None, connect=(0, 0),
rotation=0, **kwargs):
"""
Add a simple Sankey diagram with flows at the same hierarchical level.
Return value is the instance of :class:`Sankey`.
Optional keyword arguments:
=============== ===================================================
Keyword Description
=============== ===================================================
*patchlabel* label to be placed at the center of the diagram
Note: *label* (not *patchlabel*) will be passed to
the patch through ``**kwargs`` and can be used to
create an entry in the legend.
*flows* array of flow values
By convention, inputs are positive and outputs are
negative.
*orientations* list of orientations of the paths
Valid values are 1 (from/to the top), 0 (from/to
the left or right), or -1 (from/to the bottom). If
*orientations* == 0, inputs will break in from the
left and outputs will break away to the right.
*labels* list of specifications of the labels for the flows
Each value may be *None* (no labels), '' (just
label the quantities), or a labeling string. If a
single value is provided, it will be applied to all
flows. If an entry is a non-empty string, then the
quantity for the corresponding flow will be shown
below the string. However, if the *unit* of the
main diagram is None, then quantities are never
shown, regardless of the value of this argument.
*trunklength* length between the bases of the input and output
groups
*pathlengths* list of lengths of the arrows before break-in or
after break-away
If a single value is given, then it will be applied
to the first (inside) paths on the top and bottom,
and the length of all other arrows will be
justified accordingly. The *pathlengths* are not
applied to the horizontal inputs and outputs.
*prior* index of the prior diagram to which this diagram
should be connected
*connect* a (prior, this) tuple indexing the flow of the
prior diagram and the flow of this diagram which
should be connected
If this is the first diagram or *prior* is *None*,
*connect* will be ignored.
*rotation* angle of rotation of the diagram [deg]
*rotation* is ignored if this diagram is connected
to an existing one (using *prior* and *connect*).
The interpretation of the *orientations* argument
will be rotated accordingly (e.g., if *rotation*
== 90, an *orientations* entry of 1 means to/from
the left).
=============== ===================================================
Valid kwargs are :meth:`matplotlib.patches.PathPatch` arguments:
%(Patch)s
As examples, ``fill=False`` and ``label='A legend entry'``.
By default, ``facecolor='#bfd1d4'`` (light blue) and
``linewidth=0.5``.
The indexing parameters (*prior* and *connect*) are zero-based.
The flows are placed along the top of the diagram from the inside out
in order of their index within the *flows* list or array. They are
placed along the sides of the diagram from the top down and along the
bottom from the outside in.
If the sum of the inputs and outputs is nonzero, the discrepancy
will appear as a cubic Bezier curve along the top and bottom edges of
the trunk.
.. seealso::
:meth:`finish`
"""
# Check and preprocess the arguments.
if flows is None:
flows = np.array([1.0, -1.0])
else:
flows = np.array(flows)
n = flows.shape[0] # Number of flows
if rotation is None:
rotation = 0
else:
# In the code below, angles are expressed in deg/90.
rotation /= 90.0
if orientations is None:
orientations = [0, 0]
if len(orientations) != n:
raise ValueError(
"orientations and flows must have the same length.\n"
"orientations has length %d, but flows has length %d."
% (len(orientations), n))
if labels != '' and getattr(labels, '__iter__', False):
# iterable() isn't used because it would give True if labels is a
# string
if len(labels) != n:
raise ValueError(
"If labels is a list, then labels and flows must have the "
"same length.\nlabels has length %d, but flows has length %d."
% (len(labels), n))
else:
labels = [labels] * n
if trunklength < 0:
raise ValueError(
"trunklength is negative.\nThis isn't allowed, because it would "
"cause poor layout.")
if np.absolute(np.sum(flows)) > self.tolerance:
verbose.report(
"The sum of the flows is nonzero (%f).\nIs the "
"system not at steady state?" % np.sum(flows), 'helpful')
scaled_flows = self.scale * flows
gain = sum(max(flow, 0) for flow in scaled_flows)
loss = sum(min(flow, 0) for flow in scaled_flows)
if not (0.5 <= gain <= 2.0):
verbose.report(
"The scaled sum of the inputs is %f.\nThis may "
"cause poor layout.\nConsider changing the scale so"
" that the scaled sum is approximately 1.0." % gain, 'helpful')
if not (-2.0 <= loss <= -0.5):
verbose.report(
"The scaled sum of the outputs is %f.\nThis may "
"cause poor layout.\nConsider changing the scale so"
" that the scaled sum is approximately 1.0." % gain, 'helpful')
if prior is not None:
if prior < 0:
raise ValueError("The index of the prior diagram is negative.")
if min(connect) < 0:
raise ValueError(
"At least one of the connection indices is negative.")
if prior >= len(self.diagrams):
raise ValueError(
"The index of the prior diagram is %d, but there are "
"only %d other diagrams.\nThe index is zero-based."
% (prior, len(self.diagrams)))
if connect[0] >= len(self.diagrams[prior].flows):
raise ValueError(
"The connection index to the source diagram is %d, but "
"that diagram has only %d flows.\nThe index is zero-based."
% (connect[0], len(self.diagrams[prior].flows)))
if connect[1] >= n:
raise ValueError(
"The connection index to this diagram is %d, but this diagram"
"has only %d flows.\n The index is zero-based."
% (connect[1], n))
if self.diagrams[prior].angles[connect[0]] is None:
raise ValueError(
"The connection cannot be made. Check that the magnitude "
"of flow %d of diagram %d is greater than or equal to the "
"specified tolerance." % (connect[0], prior))
flow_error = (self.diagrams[prior].flows[connect[0]] +
flows[connect[1]])
if abs(flow_error) >= self.tolerance:
raise ValueError(
"The scaled sum of the connected flows is %f, which is not "
"within the tolerance (%f)." % (flow_error, self.tolerance))
# Determine if the flows are inputs.
are_inputs = [None] * n
for i, flow in enumerate(flows):
if flow >= self.tolerance:
are_inputs[i] = True
elif flow <= -self.tolerance:
are_inputs[i] = False
else:
verbose.report(
"The magnitude of flow %d (%f) is below the "
"tolerance (%f).\nIt will not be shown, and it "
"cannot be used in a connection."
% (i, flow, self.tolerance), 'helpful')
# Determine the angles of the arrows (before rotation).
angles = [None] * n
for i, (orient, is_input) in enumerate(zip(orientations, are_inputs)):
if orient == 1:
if is_input:
angles[i] = DOWN
elif not is_input:
# Be specific since is_input can be None.
angles[i] = UP
elif orient == 0:
if is_input is not None:
angles[i] = RIGHT
else:
if orient != -1:
raise ValueError(
"The value of orientations[%d] is %d, "
"but it must be [ -1 | 0 | 1 ]." % (i, orient))
if is_input:
angles[i] = UP
elif not is_input:
angles[i] = DOWN
# Justify the lengths of the paths.
if iterable(pathlengths):
if len(pathlengths) != n:
raise ValueError(
"If pathlengths is a list, then pathlengths and flows must "
"have the same length.\npathlengths has length %d, but flows "
"has length %d." % (len(pathlengths), n))
else: # Make pathlengths into a list.
urlength = pathlengths
ullength = pathlengths
lrlength = pathlengths
lllength = pathlengths
d = dict(RIGHT=pathlengths)
pathlengths = [d.get(angle, 0) for angle in angles]
# Determine the lengths of the top-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(zip(angles, are_inputs,
scaled_flows)):
if angle == DOWN and is_input:
pathlengths[i] = ullength
ullength += flow
elif angle == UP and not is_input:
pathlengths[i] = urlength
urlength -= flow # Flow is negative for outputs.
# Determine the lengths of the bottom-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(reversed(list(zip(
angles, are_inputs, scaled_flows)))):
if angle == UP and is_input:
pathlengths[n - i - 1] = lllength
lllength += flow
elif angle == DOWN and not is_input:
pathlengths[n - i - 1] = lrlength
lrlength -= flow
# Determine the lengths of the left-side arrows
# from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, zip(scaled_flows, pathlengths))))):
if angle == RIGHT:
if is_input:
if has_left_input:
pathlengths[n - i - 1] = 0
else:
has_left_input = True
# Determine the lengths of the right-side arrows
# from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT:
if not is_input:
if has_right_output:
pathlengths[i] = 0
else:
has_right_output = True
# Begin the subpaths, and smooth the transition if the sum of the flows
# is nonzero.
urpath = [(Path.MOVETO, [(self.gap - trunklength / 2.0), # Upper right
gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
gain / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
gain / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap),
-loss / 2.0])]
llpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower left
loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
loss / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
loss / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0),
-gain / 2.0])]
lrpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower right
loss / 2.0])]
ulpath = [(Path.LINETO, [self.gap - trunklength / 2.0, # Upper left
gain / 2.0])]
# Add the subpaths and assign the locations of the tips and labels.
tips = np.zeros((n, 2))
label_locations = np.zeros((n, 2))
# Add the top-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == DOWN and is_input:
tips[i, :], label_locations[i, :] = self._add_input(
ulpath, angle, *spec)
elif angle == UP and not is_input:
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Add the bottom-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == UP and is_input:
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
elif angle == DOWN and not is_input:
tip, label_location = self._add_output(lrpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the left-side inputs from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == RIGHT and is_input:
if not has_left_input:
# Make sure the lower path extends
# at least as far as the upper one.
if llpath[-1][1][0] > ulpath[-1][1][0]:
llpath.append((Path.LINETO, [ulpath[-1][1][0],
llpath[-1][1][1]]))
has_left_input = True
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the right-side outputs from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT and not is_input:
if not has_right_output:
# Make sure the upper path extends
# at least as far as the lower one.
if urpath[-1][1][0] < lrpath[-1][1][0]:
urpath.append((Path.LINETO, [lrpath[-1][1][0],
urpath[-1][1][1]]))
has_right_output = True
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Trim any hanging vertices.
if not has_left_input:
ulpath.pop()
llpath.pop()
if not has_right_output:
lrpath.pop()
urpath.pop()
# Concatenate the subpaths in the correct order (clockwise from top).
path = (urpath + self._revert(lrpath) + llpath + self._revert(ulpath) +
[(Path.CLOSEPOLY, urpath[0][1])])
# Create a patch with the Sankey outline.
codes, vertices = list(zip(*path))
vertices = np.array(vertices)
def _get_angle(a, r):
if a is None:
return None
else:
return a + r
if prior is None:
if rotation != 0: # By default, none of this is needed.
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
tips = rotate(tips)
label_locations = rotate(label_locations)
vertices = rotate(vertices)
text = self.ax.text(0, 0, s=patchlabel, ha='center', va='center')
else:
rotation = (self.diagrams[prior].angles[connect[0]] -
angles[connect[1]])
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
tips = rotate(tips)
offset = self.diagrams[prior].tips[connect[0]] - tips[connect[1]]
translate = Affine2D().translate(*offset).transform_affine
tips = translate(tips)
label_locations = translate(rotate(label_locations))
vertices = translate(rotate(vertices))
kwds = dict(s=patchlabel, ha='center', va='center')
text = self.ax.text(*offset, **kwds)
if False: # Debug
print("llpath\n", llpath)
print("ulpath\n", self._revert(ulpath))
print("urpath\n", urpath)
print("lrpath\n", self._revert(lrpath))
xs, ys = list(zip(*vertices))
self.ax.plot(xs, ys, 'go-')
patch = PathPatch(Path(vertices, codes),
fc=kwargs.pop('fc', kwargs.pop('facecolor',
'#bfd1d4')), # Custom defaults
lw=kwargs.pop('lw', kwargs.pop('linewidth', 0.5)),
**kwargs)
self.ax.add_patch(patch)
# Add the path labels.
texts = []
for number, angle, label, location in zip(flows, angles, labels,
label_locations):
if label is None or angle is None:
label = ''
elif self.unit is not None:
quantity = self.format % abs(number) + self.unit
if label != '':
label += "\n"
label += quantity
texts.append(self.ax.text(x=location[0], y=location[1],
s=label,
ha='center', va='center'))
# Text objects are placed even they are empty (as long as the magnitude
# of the corresponding flow is larger than the tolerance) in case the
# user wants to provide labels later.
# Expand the size of the diagram if necessary.
self.extent = (min(np.min(vertices[:, 0]),
np.min(label_locations[:, 0]),
self.extent[0]),
max(np.max(vertices[:, 0]),
np.max(label_locations[:, 0]),
self.extent[1]),
min(np.min(vertices[:, 1]),
np.min(label_locations[:, 1]),
self.extent[2]),
max(np.max(vertices[:, 1]),
np.max(label_locations[:, 1]),
self.extent[3]))
# Include both vertices _and_ label locations in the extents; there are
# where either could determine the margins (e.g., arrow shoulders).
# Add this diagram as a subdiagram.
self.diagrams.append(Bunch(patch=patch, flows=flows, angles=angles,
tips=tips, text=text, texts=texts))
# Allow a daisy-chained call structure (see docstring for the class).
return self
def finish(self):
"""
Adjust the axes and return a list of information about the Sankey
subdiagram(s).
Return value is a list of subdiagrams represented with the following
fields:
=============== ===================================================
Field Description
=============== ===================================================
*patch* Sankey outline (an instance of
:class:`~maplotlib.patches.PathPatch`)
*flows* values of the flows (positive for input, negative
for output)
*angles* list of angles of the arrows [deg/90]
For example, if the diagram has not been rotated,
an input to the top side will have an angle of 3
(DOWN), and an output from the top side will have
an angle of 1 (UP). If a flow has been skipped
(because its magnitude is less than *tolerance*),
then its angle will be *None*.
*tips* array in which each row is an [x, y] pair
indicating the positions of the tips (or "dips") of
the flow paths
If the magnitude of a flow is less the *tolerance*
for the instance of :class:`Sankey`, the flow is
skipped and its tip will be at the center of the
diagram.
*text* :class:`~matplotlib.text.Text` instance for the
label of the diagram
*texts* list of :class:`~matplotlib.text.Text` instances
for the labels of flows
=============== ===================================================
.. seealso::
:meth:`add`
"""
self.ax.axis([self.extent[0] - self.margin,
self.extent[1] + self.margin,
self.extent[2] - self.margin,
self.extent[3] + self.margin])
self.ax.set_aspect('equal', adjustable='datalim')
return self.diagrams
| mit |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
USGSDenverPychron/pychron | pychron/processing/utils/flux_visualizer.py | 1 | 13319 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from traits.etsconfig.etsconfig import ETSConfig
ETSConfig.toolkit = 'qt4'
# ============= enthought library imports =======================
import csv
from pylab import show, meshgrid, zeros, \
contourf, array, linspace, scatter, cm, xlabel, ylabel, \
colorbar, rc, gcf
import math
# from itertools import groupby
import matplotlib.pyplot as plt
from pychron.core.regression.ols_regressor import MultipleLinearRegressor
from pychron.core.regression.flux_regressor import PlaneFluxRegressor
# ============= standard library imports ========================
# ============= local library imports ==========================
def load_holder(holder):
holes = []
with open(holder, 'r') as rfile:
reader = csv.reader(rfile, delimiter=',')
reader.next() # pop header
for i, line in enumerate(reader):
try:
x, y = map(float, line)
v = math.atan2(y, x)
# print i + 1, v, x, y, math.degrees(v)
holes.append((x, y, v))
except Exception:
pass
return holes
def pt_factory(line, holes):
'''
return theta, r for each line
theta is calculated from corresponding x,y
r is flux
'''
try:
hole, j, sample = line
# if sample == 'FC-2':
x, y, theta = holes[int(hole) - 1]
# print hole, theta
return x, y, theta, float(j)
except Exception, e:
print 'exception', e
def get_column_idx(names, header):
if not isinstance(names, (list, tuple)):
names = (names,)
for attr in names:
for ai in (attr, attr.lower(), attr.upper(), attr.capitalize()):
if ai in header:
return header.index(ai)
def load_flux_xls(p, holes, header_offset=1):
import xlrd
wb = xlrd.open_workbook(p)
sheet = wb.sheet_by_index(0)
header = sheet.row_values(0)
hole_idx = get_column_idx('hole', header)
j_idx = get_column_idx('j', header)
j_err_idx = get_column_idx(('j_error', 'j err'), header)
data = []
hole_ids = []
for ri in range(sheet.nrows - header_offset):
ri += header_offset
# if ri % 2 == 0:
# continue
hole = sheet.cell_value(ri, hole_idx)
if hole:
hole = int(hole) - 1
j = sheet.cell_value(ri, j_idx)
je = sheet.cell_value(ri, j_err_idx)
j, je = float(j), float(je)
x, y, v = holes[hole]
hole_ids.append(hole)
data.append((x, y, j, je))
data = array(data)
return data.T, hole_ids
def flux_contour2d(xx, yy, z, ze, holes, hole_ids, fit_dev=False, age_space=0):
x, y = zip(*[(x, y) for i, (x, y, v) in enumerate(holes)
if not i in hole_ids])
n = z.shape[0]
r = max(xx)
XX, YY = make_grid(r, n)
m = model_flux(n, xx, yy, XX, YY, z)
if fit_dev:
k = model_flux(n, xx[:22], yy[:22], XX, YY, z[:22], klass=MultipleLinearRegressor)
zi = m - k
else:
zi = m
if age_space:
zi *= age_space
r = max(xx)
xi = linspace(-r, r, n)
yi = linspace(-r, r, n)
contourf(xi, yi, zi, 50, cmap=cm.jet)
cb = colorbar()
label = 'Delta-Age @28.02 (ka)' if age_space else 'J %'
cb.set_label(label)
scatter(x, y, marker='o',
c='black',
s=200,
alpha=0.1
)
scatter(xx, yy, marker='o',
cmap=cm.jet,
# lw=0,
# facecolor='None',
c='black',
# c=z,
s=200,
alpha=0.5
# s=300
)
# draw_border(1)
# draw_circular_grid(r, rings)
f = gcf()
f.set_size_inches((8, 8))
plt.axes().set_aspect('equal')
xlabel('X (mm)')
ylabel('Y (mm)')
rc('font', **{'size': 24})
def model_flux(n, xx, yy, XX, YY, z, klass=PlaneFluxRegressor):
nz = zeros((n, n))
xy = zip(xx, yy)
reg = klass(xs=xy, ys=z)
for i in xrange(n):
for j in xrange(n):
pt = (XX[i, j],
YY[i, j])
v = reg.predict([pt])[0]
nz[i, j] = v
return nz
def flux_contour3d(xx, yy, z, ze):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(xx, yy , z, c=z,
s=100,
lw=0,
cmap=cm.jet
)
# plot error bars
for xj, yj, zj, ej in zip(xx, yy, z, ze):
ax.plot([xj, xj], [yj, yj], [zj - ej, zj + ej],
c='black')
# ax.plot([xj, xj], [yj, yj], [zj, zj - ej])
# print X.shape
# contourf(xi, yi, zi, 25, cmap=cm.jet)
# ax.contourf(XX, YY, zi,
# 20,
# cmap=cm.jet)
n = z.shape[0]
r = max(xx)
XX, YY = make_grid(r, n)
ZZ = model_flux(n, xx, yy, XX, YY, z)
p = ax.plot_surface(XX, YY, ZZ, cmap=cm.jet,
rstride=2, cstride=2,
linewidth=0, antialiased=False,
alpha=0.5
)
ZZ = model_flux(n, xx[:22], yy[:22], XX, YY, z[:22],
klass=MultipleLinearRegressor)
p = ax.plot_surface(XX, YY, ZZ, cmap=cm.jet,
rstride=2, cstride=2,
linewidth=0, antialiased=False,
alpha=0.5
)
cb = fig.colorbar(p)
cb.set_label('Delta-J %')
def visualize_flux_contour(p, holder, delim=','):
# from traits.etsconfig.etsconfig import ETSConfig
# ETSConfig.toolkit = 'qt4'
# from pychron.core.regression.ols_regressor import MultipleLinearRegressor
use_2d = True
#use_2d = False
fit_dev = False
# calc_dev = True
# calc_dev = False
# age_space = 280.2
# age_space = 0
holes = load_holder(holder)
(xx, yy, z, ze), hole_ids = load_flux_xls(p, holes)
ze = (ze / z) * 100
mz = min(z)
z = z - mz
z /= mz
z *= 100
# n = z.shape[0]
# r = max(xx)
# xi = linspace(-r, r, n)
# yi = linspace(-r, r, n)
# X = xi[None, :]
# Y = yi[:, None]
# zi = griddata((xx, yy), z, (X, Y),
# # method='linear',
# method='cubic',
# # fill_value=min(z)
# )
# XX, YY = meshgrid(xi, yi)
if use_2d:
flux_contour2d(xx, yy, z, ze, holes, hole_ids, fit_dev=fit_dev)
else:
flux_contour3d(xx, yy, z, ze,)
show()
def interpolate_flux(pholes, p, holder, delim=','):
holes = load_holder(holder)
(xx, yy, z, ze), hole_ids = load_flux_xls(p, holes)
xy = zip(xx, yy)
reg = PlaneFluxRegressor(xs=xy, ys=z, yserr=ze, error_calc_type='SEM')
reg.calculate()
output = []
for hi in pholes:
x, y, t = holes[hi - 1]
j, je = 0, 0
pt = [(x, y)]
j = reg.predict(pt)[0]
je = reg.predict_error([pt])[0]
print hi, j, je
#output.append((j,je))
#n = z.shape[0]
#r = max(xx)
#XX, YY = make_grid(r, n)
#ZZ = model_flux(n, xx, yy, XX, YY, z)
def make_grid(r, n):
xi = linspace(-r, r, n)
yi = linspace(-r, r, n)
return meshgrid(xi, yi)
if __name__ == '__main__':
# p = '/Users/ross/Sandbox/flux_visualizer/J_data_for_nm-258_tray_G_radial.txt'
p = '/Users/ross/Sandbox/flux_visualizer/J_data_for_nm-258_tray_G2.txt'
p = '/Users/ross/Sandbox/flux_visualizer/J_data_for_nm-258_tray_G3.txt'
p = '/Users/ross/Sandbox/flux_visualizer/J_nm-258_tray_G.xls'
p = '/Users/ross/Sandbox/flux_visualizer/J_nm-258_tray_G2.xls'
p = '/Users/ross/Sandbox/flux_visualizer/J_NM-259A.xls'
# p = '/Users/ross/Sandbox/flux_visualizer/J_NM-259A2.xls'
# p = '/Users/ross/Sandbox/flux_visualizer/Tray_I_NM-261.xls'
# p = '/Users/ross/Sandbox/flux_visualizer/runid_contour.txt'
# p = '/Users/ross/Sandbox/flux_visualizer/J_data_for_nm-258_tray_G.txt'
holder = '/Users/ross/Sandbox/flux_visualizer/irradiation_tray_maps/1_75mm_3level'
# visualize_flux(p, holder)
# visualize_flux_contour(p, holder)
# visualize_flux_contour(p, holder, delim='\t')
holes = [45, 47, 49, 51, 53]
interpolate_flux(holes, p, holder, delim='\t')
# ============= EOF =============================================
# def load_flux_csv(p, holes, delim):
# with open(p, 'U') as fp:
# reader = csv.reader(fp, delimiter=delim)
# reader.next()
#
# mi = Inf
# ma = -Inf
# xy = []
# z = []
# ze = []
# # rs = []
# hole_ids = []
# for line in reader:
# if not line:
# continue
# hole = int(line[0])
# hole_ids.append(hole - 1)
# j = float(line[1])
# je = float(line[2])
# x, y, v = holes[hole - 1]
# xy.append((x, y))
# z.append(j)
# z.append(je)
# # rs.append(round(((x ** 2 + y ** 2) ** 0.5) * 100))
#
# mi = min(mi, j)
# ma = max(ma, j)
#
# xy = array(xy)
# xx, yy = xy.T
# z = array(z)
#
# return (xx, yy, z, ze), hole_ids
# def draw_border(d):
# r = d / 2.
# xs = linspace(-r, r, 200)
# y = (r ** 2 - xs ** 2) ** 0.5
# plot(xs, y, c='black')
# plot(xs, -y, c='black')
# def draw_circular_grid(r, rings):
# for ray in range(0, 360, 10):
#
# m = -math.radians(ray)
# x = r * math.cos(m)
# y = r * math.sin(m)
# plot([0, x], [0, y], ls='-.', color='black')
#
# for ring in rings:
# ring /= 100.
# x = linspace(-ring, ring, 100)
# y = (ring ** 2 - x ** 2) ** 0.5
#
# plot(x, -y, ls='-.', color='black')
# x = linspace(-ring, ring, 100)
# y = (ring ** 2 - x ** 2) ** 0.5
# plot(x, y, ls='-.', color='black')
# def visualize_flux(p, holder, use_polar=False):
# '''
# load the x,y points for irrad holder
# load the flux file
#
# schema:
# hole,j
#
# '''
# holes = load_holder(holder)
# # x, y, holes = zip(*holes)
#
# with open(p, 'U') as fp:
# reader = csv.reader(fp)
# # holes = []
# # js = []
# reader.next()
# groups = []
# group = []
# for line in reader:
# if not line:
# groups.append(group)
# group = []
# else:
# group.append(pt_factory(line, holes))
# groups.append(group)
# mi = Inf
# ma = -Inf
#
# # fig = Figure()
# if not use_polar:
# fig = plt.figure()
# ax = fig.add_subplot(111,
# projection='3d'
# )
# # ax = fig.gca(projection='3d')
# mi_rings = []
# ma_rings = []
# for args in groups:
# # print args
# # theta, r = zip(*args)
# if use_polar:
# x, y, theta, r = zip(*[ai for ai in args if ai])
#
# polar(theta, r)
# mi = min(min(r), mi)
# ma = max(max(r), ma)
#
# else:
# x, y, theta, j = zip(*[ai for ai in args if ai])
# x = array(x)
# y = array(y)
# r = (x ** 2 + y ** 2) ** 0.5
#
# m, b = polyfit(x, y, 1)
#
# # xs = linspace(0, 0.5)
# # mf, bf = polyfit(x, j, 1)
# # fj = polyval((mf, bf), xs)
# # ax.plot(xs, polyval((m, b), xs), fj,)
# ax.plot(x, y, j, color='blue'
# # label='{}'.format(int(math.degrees(mean(theta))))
# )
# # ax.plot(x, y)
# midx = argmin(j)
# madx = argmax(j)
#
# mi_rings.append((x[midx], y[midx], j[midx]))
# ma_rings.append((x[madx], y[madx], j[madx]))
#
# if not use_polar:
# x, y, j = zip(*ma_rings)
# ax.plot(x, y, j, color='black')
# x, y, j = zip(*mi_rings)
# ax.plot(x, y, j, color='black')
# else:
# ylim(mi * 0.95, ma)
# # ax.set_xlim(-1, 1)
# # ax.set_ylim(-1, 1)
# # print r
# # lines = [pt_factory(line, holes) for line in reader]
# # theta, r = zip(*[args for args in lines if args])
# legend()
# show()
| apache-2.0 |
CELMA-project/dissertation | fig/figCreators/laplaceBC.py | 1 | 3256 | #!/usr/bin/env python
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
"""
Makes a parametric plot of the boundary condition of the laplace
invertor for different modes.
"""
def plotLaplaceBCOnCylinder(mode = 5 ,\
lineThroughPointNr = 45 ,\
showPlot = False,\
extension ="png" ):
"""
Plots the laplace inversion boundary condition on a cylinder
Parameters
----------
mode : int
Mode number
lineThroughPointNr : int
What point should the red line go diametrically opposite from
showPlot: bool
Whether or not to show the plot
extension : str
Extension of the plot to save
Return
------
fileName : str
The name of the saved file
"""
fig = plt.figure(figsize=(18,12))
ax = fig.gca(projection='3d')
ax.set_axis_off()
n = 128
# NOTE: The parametric curve has the coordinates (x(z), y(z), z)
# I. e. for each z, there is a corresponding x,y pair
zero = np.zeros(n)
# +1 as we are making this periodically
theta = np.linspace(0, 2*np.pi, n+1)
# Until last point (periodic point not counted twice)
theta = theta[:-1]
x = np.sin(theta)
y = np.cos(theta)
# Make the sine
sine = zero + np.sin(mode*theta)
# Make x spine (actually not a spine, just a line)
xSpine = np.linspace(-1.5, 1.5, n)
# Make y spine (actually not a spine, just a line)
ySpine = xSpine
ax.plot(xSpine, zero, zero, "k--", linewidth = 1.5)
ax.plot(zero, ySpine, zero, "k--", linewidth = 1.5)
ax.plot(x, y, zero, "-go", linewidth = 3.0)
ax.plot(x, y, sine, "-bo", linewidth = 3.0)
# Making vertical bars
for i in range(len(sine)):
pointX = (x[i], x[i] )
pointY = (y[i], y[i] )
pointZ = (0 , sine[i])
ax.plot(pointX, pointY, pointZ, "k", linewidth=1.0)
# Making the red line
diametricallyOpposite = lineThroughPointNr + int(n/2)
redX = (x[lineThroughPointNr] , x[diametricallyOpposite])
redY = (y[lineThroughPointNr] , y[diametricallyOpposite])
redZ = (sine[lineThroughPointNr], sine[diametricallyOpposite])
# NOTE: If we wanted the red line to be behind everything, we would
# have set zorder=0
ax.plot(redX, redY, redZ, "r", linewidth=2.0)
# Set view
ax.view_init(elev=55, azim=-52)
# Zoom in a bit to aviod white space
ax.dist = 6.5
fig.tight_layout(pad=0.0)
fileName = "../mode_{}.{}".format(mode, extension)
fig.savefig(fileName, transparent=True)
if showPlot:
plt.show()
return fileName
if __name__ == "__main__":
from subprocess import Popen
# Plot the odd mode
fileName =\
plotLaplaceBCOnCylinder(mode=5, lineThroughPointNr=57, extension="pdf")
# Crop with pdfCrop
Popen("pdfcrop {0} {0}".format(fileName), shell=True).wait()
# Plot the even mode
fileName =\
plotLaplaceBCOnCylinder(mode=4, lineThroughPointNr=40, extension="pdf")
# Crop with pdfCrop
Popen("pdfcrop {0} {0}".format(fileName), shell=True).wait()
| gpl-3.0 |
airanmehr/cssvm | python/eval.py | 1 | 9388 | from numpy import array, arange, sum, append
from operator import itemgetter
from matplotlib import pyplot
def get_perror(deci, label, Pn, Pp):
neg, pos, neg_err, pos_err = 0., 0., 0., 0.
for (f,y) in zip(deci,label):
if y>0:
pos+=1
if f* y <= 0:
pos_err+=1
else:
neg+=1
if f* y <= 0:
neg_err+=1
print "Pn: {0} neg: {1} neg_err: {2}".format(Pn, neg, neg_err)
print "Pp: {0} pos: {1} pos_err: {2}".format(Pp, pos, pos_err)
return Pn*neg_err/neg + Pp*pos_err/pos
def get_error(deci, label):
err= 0.
for (f,y) in zip(deci,label):
if f* y <= 0:
err+=1
return err/len(label)
def get_acc(deci, label):
correct= 0.
for (f,y) in zip(deci,label):
if f* y > 0:
correct+=1
return 100*correct/len(label)
def get_income(deci, label, costs):
income= 0.
for (f,y,c) in zip(deci,label, costs):
if f > 0 and y > 0:
income += c
if f > 0 and y < 0:
income -= c
return income
def get_risk(deci, label, Pn, Pp, Cn=1, Cp=1, doPlot=False, isTesting=False, param=None):
"""
Computes Risk for a given Decision Values, Label, Costs and Probabilities and Plots ROC Curve with denoting the ROC Operating points with
1) No thresholding (FPR and FNR associated with the Decision values)
2) Best thresholding (Finds best operating point in ROC curve for given arguments)
3) TH Thresholding (if the argument th is not None (probably computed by CV), its value is added to the decision values and operating point and its risk is computed and shown in the plot )
In CV, i.e. when th parameter is None, the threshold of the point with the best risk is returned, to be used in the test phase
"""
db,pos,neg, n=sort_deci(deci,label)
tp, fp, min_risk = 0., 0., 1
x, y = array([0]), array([1]) # x,y are coordinates in ROC space
w = [Cn * Pn , Cp * Pp ]
w = [w[0]/sum(w), w[1]/sum(w)]# Normalizing costs (for comparing risks of different costs)
for i in range(1, n + 1):
if db[i - 1][1] > 0:
tp += 1.
else:
fp += 1.
fpr = fp / neg
fnr = (pos - tp) / pos
risk = w[0] * fpr + w[1] * fnr
if min_risk >= risk:
min_risk = risk
best_op = i #Best Operating Point index in the sorted list
x, y= append(x, fpr), append(y, fnr)
ROCPoint_NoTH = get_Risk_for_TH(deci, label, w, pos, neg, 0)
if isTesting:
ROCPoint_TH = get_Risk_for_TH(deci, label, w, pos, neg, param['cv_th'])
test_th=get_TH_for_Risk(db, best_op)
if param['verb']>2:
print "Best Threshold in: CV= {0} Test= {1}".format(param['cv_th'], test_th)
else:
cv_th=get_TH_for_Risk(db, best_op)
ROCPoint_TH = None
if doPlot:
plot_ROC_Risk(w, x, y, best_op, min_risk,get_figure_fname(param), get_figure_title(param), ROCPoint_TH, ROCPoint_NoTH)
if isTesting:
return ROCPoint_TH['risk'], test_th # in Test phase, returns the risk of the point with CV_threshold and the best threshold in all operating points
else:
return min_risk, cv_th # in Training phase, returns the risk of the point with minimum risk and its threshold
def sort_deci(deci,label):
db = []
pos, neg, n = 0., 0., len(label)
for i in range(n):
if label[i] > 0:
pos += 1
else:
neg += 1
db.append([deci[i], label[i]])
db = sorted(db, key=itemgetter(0), reverse=True) # Sort in descenting order
return db, pos, neg, n
def get_TH_for_Risk(db, best_op):
return -(db[best_op][0]+db[best_op+1][0])/2
def get_Risk_for_TH(deci, label, w, pos,neg, th):
ROCPoint={'fpr': 0., 'fnr': 0.}
for (f,Y) in zip(deci,label):
if (f+th)*Y <=0:
if Y<=0:
ROCPoint['fpr'] +=1
else:
ROCPoint['fnr'] +=1
ROCPoint['fpr']/=neg
ROCPoint['fnr']/=pos
ROCPoint['risk']= w[0] * ROCPoint['fpr'] + w[1] * ROCPoint['fnr']
return ROCPoint
def get_figure_fname(param):
# print param
return '/home/arya/{0}.{1}.png'.format(param['dataset_name'],param['alg'])
def get_figure_title(param):
return '{0} on {1}'.format(param['alg'], param['dataset_name'])
def get_auc(deci, label, param):
"""
Computes AUC for param['hit'] > t, By default it computes AUC for TP > 0.9
"""
db, xy_arr = [], []
pos, neg, tp, fp, auc, err = 0, 0, 0, 0, 0., 0.
n = len(label)
for i in range(n):
if label[i] > 0:
if deci[i]<0:
err+=1
pos += 1
else:
if deci[i]>0:
err+=1
neg += 1
db.append([deci[i], label[i]])
db = sorted(db, key=itemgetter(0), reverse=True)
for i in range( n):
if db[i - 1][1] > 0:
tp += 1.
else:
fp += 1.
fpr = fp / neg
fn = pos - tp
fnr = fn / pos
xy_arr.append([fpr, fnr])
xy_arr.append([1,0])
if param['hit']=='TN': #TN
prev_x = 0
for x,y in xy_arr:
if x > param['t'] :
break
if x != prev_x:
auc += (x - prev_x) * y
prev_x = x
auc+=(param['t']-prev_x)*y
else: #TP
prev_y = 0
for x,y in reversed( xy_arr):
if y> param['t']:
break
if y != prev_y:
auc += (y - prev_y) * x
prev_y = y
auc+=(param['t']-prev_y)*x
auc= auc/(1-param['t'])
if param['do_plot']:
plot_ROC_tAUC(x, y)
return auc
def get_auc(deci, label, param):
if param['measure']=='TP':
Fix_Detection=True
elif param['measure']=='TN':
Fix_Detection=False
else:
print 'The measure should be determined either TP or TN.'
exit()
t=1-param['t']
db = []
auc = 0.
pos, neg, tp, fp= 0, 0, 0, 0
n = len(label)
err=0.
for i in range(n):
if label[i] > 0:
if deci[i]<0:
err+=1
pos += 1
else:
if deci[i]>0:
err+=1
neg += 1
db.append([deci[i], label[i]])
db = sorted(db, key=itemgetter(0), reverse=True)
xy_arr = []
for i in range( n):
if db[i - 1][1] > 0:
tp += 1.
else:
fp += 1.
fpr = fp / neg
fn = pos - tp
fnr = fn / pos
xy_arr.append([fpr, fnr])
xy_arr.append([1,0])
if not Fix_Detection: #TN
prev_x = 0
for x,y in xy_arr:
if x > t :
break
if x != prev_x:
auc += (x - prev_x) * y
prev_x = x
auc+=(t-prev_x)*y
else: #TP
prev_y = 0
for x,y in reversed( xy_arr):
if y> t:
break
if y != prev_y:
auc += (y - prev_y) * x
prev_y = y
auc+=(t-prev_y)*x
auc= auc/t
# print "AUC:{3} Error: {0} ({1} out of {2})".format(err/n,err,n,auc)
return auc
def plot_ROC_Risk(w,x,y,best_operating_point,risk,fname,title, TH=None, NoTH=None):
from matplotlib import rc
from numpy import ones
import pylab
rc('font', family='serif', size=20)
fig = pylab.figure(1, figsize=(8, 8), dpi=100, facecolor='w')
ax = fig.add_subplot(111)
fig.hold(True)
m = w[0] / w[1]
line_x = arange(0, 1, 0.001)
line_y = ones(1000) - m * line_x
pylab.fill_between(x , ones(len(y)), y, facecolor='#D0D0D0')
pylab.plot(x, y, 'r', linewidth=3)
best=[x[best_operating_point], y[best_operating_point]]
pylab.plot(best[0], best[1], '*k', markersize=20, label='Risk$^*$ ={0:.3f}'.format(risk))
pylab.plot(line_x, line_y, 'k', linewidth=3)
midline=len(line_x)/2
arrow_x, arrow_y=line_x[midline], line_y[midline]
pylab.arrow(arrow_x, arrow_y, -w[0] * 0.15 , -w[1] * 0.15, head_length=.02, head_width=.02, linewidth=2, color='black')
pylab.axis([-0.005, 1.001, -0.005, 1.001])
pylab.xlabel('$FPR$')
pylab.ylabel('$FNR$')
pylab.title(title)
if TH!=None:
pylab.plot(TH['fpr'], TH['fnr'], 'ob', markersize=12, label='TH Risk={0:.3f}'.format(TH['risk']))
if NoTH!=None:
pylab.plot(NoTH['fpr'], NoTH['fnr'], 'sc', markersize=12, label='BM Risk={0:.3f}'.format(NoTH['risk']))
pyplot.legend(numpoints=1, prop={'size':16})
pylab.savefig(fname)
pylab.grid()
ax.set_aspect('equal')
pylab.show()
pylab.close()
def plot_ROC_tAUC(x,y):
from matplotlib import rc
rc('text', usetex=True)
rc('font', family='serif', size=24)
import pylab
fig = pylab.figure(1, figsize=(8, 7), dpi=100, facecolor='w')
fig.hold(True)
from numpy import ones
pylab.fill_between(x , ones(len(y)), y, facecolor='#D0D0D0')
pylab.plot(x, y, 'r', linewidth=3)
# pylab.plot(-1,-1,'ob',markersize=10,label='Nayman Pearson')
pylab.axis([-0.005, 1.001, -0.005, 1.001])
pylab.xlabel('$P_{FP}$', fontsize=30)
pylab.ylabel('$P_{FN}$', fontsize=30)
pylab.grid()
pylab.show()
pylab.close() | mit |
jor-/scipy | doc/source/tutorial/stats/plots/kde_plot3.py | 132 | 1229 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
np.random.seed(12456)
x1 = np.random.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
| bsd-3-clause |
zaxtax/scikit-learn | sklearn/gaussian_process/kernels.py | 18 | 66251 | """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = (bounds == "fixed")
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels."""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr, value in sorted(self.__dict__.items()):
if attr.startswith("hyperparameter_"):
r.append(value)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(getattr(self, hyperparameter.name))
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
setattr(self, hyperparameter.name,
np.exp(theta[i:i + hyperparameter.n_elements]))
i += hyperparameter.n_elements
else:
setattr(self, hyperparameter.name, np.exp(theta[i]))
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1."""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y)."""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels."""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators. """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
self.hyperparameter_constant_value = \
Hyperparameter("constant_value", "numeric", constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
self.hyperparameter_noise_level = \
Hyperparameter("noise_level", "numeric", noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
if np.iterable(length_scale):
if len(length_scale) > 1:
self.anisotropic = True
self.length_scale = np.asarray(length_scale, dtype=np.float)
else:
self.anisotropic = False
self.length_scale = float(length_scale[0])
else:
self.anisotropic = False
self.length_scale = float(length_scale)
self.length_scale_bounds = length_scale_bounds
if self.anisotropic: # anisotropic length_scale
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds,
len(length_scale))
else:
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or self.length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (self.length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
raise Exception("Anisotropic kernels require that the number "
"of length scales and features match.")
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, self.length_scale)
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (self.length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else: # isotropic
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_alpha = \
Hyperparameter("alpha", "numeric", alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_periodicity = \
Hyperparameter("periodicity", "numeric", periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
self.hyperparameter_sigma_0 = \
Hyperparameter("sigma_0", "numeric", sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.hyperparameter_gamma = \
Hyperparameter("gamma", "numeric", gamma_bounds)
self.metric = metric
if pairwise_kernels_kwargs is not None:
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
else:
self.pairwise_kernels_kwargs = {}
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**self.pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **self.pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bsd-3-clause |
iismd17/scikit-learn | sklearn/neighbors/approximate.py | 71 | 22357 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
alfeinsod/xastropy | xastropy/xguis/utils.py | 3 | 2167 | """
#;+
#; NAME:
#; utils
#; Version 1.0
#;
#; PURPOSE:
#; Module for Simple Guis with QT
#; 27-Dec-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
# Import libraries
import numpy as np
import os, sys
import matplotlib.pyplot as plt
import glob
from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
# Matplotlib Figure object
from matplotlib.figure import Figure
from xastropy.xutils import xdebug as xdb
# ##################################
# GUI for velocity plot
class WarningWidg(QtGui.QDialog):
''' GUI to warn user about coming action and solicit response
24-Dec-2014 by JXP
'''
def __init__(self, message, parent=None):
'''
message = str
Message to display
'''
super(WarningWidg, self).__init__(parent)
# Initialize
# Grab the pieces and tie together
z_label = QtGui.QLabel('Warning: {:s}'.format(message))
# Quit
nbtn = QtGui.QPushButton('No', self)
nbtn.clicked.connect(self.touch_no)
ybtn = QtGui.QPushButton('Yes', self)
ybtn.clicked.connect(self.touch_yes)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(z_label)
vbox.addWidget(nbtn)
vbox.addWidget(ybtn)
self.setLayout(vbox)
def touch_yes(self):
self.ans = True
self.done(0)
def touch_no(self):
self.ans = False
self.done(0)
# ################
# TESTING
if __name__ == "__main__":
flg_fig = 0
flg_fig += 2**0 # Warning
if (flg_fig % 2) == 1:
app = QtGui.QApplication(sys.argv)
app.setApplicationName('Warning')
main = WarningWidg('Will remove all lines. \n Continue??')
main.show()
app.exec_()
if main.ans:
print('You answered yes!')
else:
print('You answered no!')
| bsd-3-clause |
timnon/pyschedule | src/pyschedule/plotters/matplotlib.py | 1 | 5819 | from __future__ import absolute_import as _absolute_import
import operator
#! /usr/bin/env python
'''
Copyright 2015 Tim Nonner
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
def plot(scenario,img_filename=None,resource_height=1.0,show_task_labels=True,
color_prec_groups=False,hide_tasks=[],hide_resources=[],task_colors=dict(),fig_size=(15,5),
vertical_text=False) :
"""
Plot the given solved scenario using matplotlib
Args:
scenario: scenario to plot
msg: 0 means no feedback (default) during computation, 1 means feedback
"""
try :
import matplotlib
if img_filename is not None:
matplotlib.use('Agg')
import matplotlib.patches as patches, matplotlib.pyplot as plt
except :
raise Exception('ERROR: matplotlib is not installed')
import random
S = scenario
# trivial connected components implementation to avoid
# having to import other packages just for that
def get_connected_components(edges) :
comps = dict()
for v,u in edges :
if v not in comps and u not in comps :
comps[v] = v
comps[u] = v
elif v in comps and u not in comps :
comps[u] = comps[v]
elif v not in comps and u in comps :
comps[v] = comps[u]
elif v in comps and u in comps and comps[v] != comps[u] :
old_comp = comps[u]
for w in comps :
if comps[w] == old_comp :
comps[w] = comps[v]
# replace component identifiers by integers startting with 0
values = list(comps.values())
comps = { T : values.index(comps[T]) for T in comps }
return comps
tasks = [ T for T in S.tasks() if T not in hide_tasks ]
# get connected components dict for coloring
# each task is mapping to an integer number which corresponds
# to its connected component
edges = [ (T,T) for T in tasks ]
if color_prec_groups :
edges += [ (T,T_) for P in set(S.precs_lax()) | set(S.precs_tight()) \
for T in P.tasks() for T_ in P.tasks() \
if T in tasks and T_ in tasks ]
comps = get_connected_components(edges)
# color map
colors = ['#7EA7D8','#A1D372','#EB4845','#7BCDC8','#FFF79A'] #pastel colors
#colors = ['red','green','blue','yellow','orange','black','purple'] #basic colors
colors += [ [ random.random() for i in range(3) ] for x in range(len(S.tasks())) ] #random colors
color_map = { T : colors[comps[T]] for T in comps }
# replace colors with fixed task colors
for T in task_colors :
color_map[T] = task_colors[T]
hide_tasks_str = [ T for T in hide_tasks ]
for T in scenario.tasks():
if hasattr(T,'plot_color'):
if T['plot_color'] is not None:
color_map[T] = T['plot_color']
else:
hide_tasks_str.append(T)
solution = S.solution()
solution = [ (T,R,x,y) for (T,R,x,y) in solution if T not in hide_tasks_str ] #tasks of zero length are not plotted
fig, ax = plt.subplots(1, 1, figsize=fig_size)
resource_sizes_count = 0
visible_resources = [ R for R in S.resources() if R not in hide_resources ]
if not visible_resources:
raise Exception('ERROR: no resources to plot')
total_resource_sizes = sum([ R.size for R in visible_resources ])
R_ticks = list()
for R in visible_resources :
if R.size is not None :
resource_size = R.size
else :
resource_size = 1.0
R_ticks += [str(R.name)]*int(resource_size)
# compute the levels of the tasks on one resource
task_levels = dict()
# get solution on resource sorted according to start time
R_solution = [ (T,R_,x,y) for (T,R_,x,y) in solution if R_ == R ]
R_solution = sorted(R_solution, key=lambda x : x[2])
# iteratively fill all levels on resource, start with empty fill
level_fill = { i : 0 for i in range(int(resource_size)) }
for T,R_,x,y in R_solution :
sorted_levels = sorted(level_fill.items(), key = operator.itemgetter(1, 0))
# get the maximum resource requirement
coeff = max([ RA[R] for RA in T.resources_req if R_ in RA ])
min_levels = [ level for level,fill in sorted_levels[:coeff] ]
task_levels[T] = min_levels
for level in min_levels :
level_fill[level] += T.length
# plot solution
for (T,R,x,x_) in R_solution :
for level in task_levels[T] :
y = (total_resource_sizes-1-(resource_sizes_count+level))*resource_height
ax.add_patch(
patches.Rectangle(
(x, y), # (x,y)
max(x_-x,0.5), # width
resource_height, # height
color = color_map[T],
alpha=0.6
)
)
if show_task_labels :
if vertical_text:
text_rotation = 90
y_ = y+0.9*resource_height
else:
text_rotation = 0
y_ = y+0.1*resource_height
plt.text(x,y_,str(T.name),fontsize=14,color='black',rotation=text_rotation)
resource_sizes_count += resource_size
# format graph
plt.title(str(S.name))
plt.yticks([ resource_height*x + resource_height/2.0 for x in range(len(R_ticks)) ],R_ticks[::-1])
plt.ylim(0,resource_sizes_count*resource_height)#resource_height*len(resources))
plt.xlim(0,max([ x_ for (I,R,x,x_) in solution if R in visible_resources ]))
if img_filename is not None:
fig.figsize=(1,1)
plt.savefig(img_filename,dpi=200,bbox_inches='tight')
else :
plt.show()
| apache-2.0 |
mistercrunch/panoramix | superset/examples/random_time_series.py | 3 | 2925 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pandas as pd
from sqlalchemy import DateTime, String
from superset import db
from superset.models.slice import Slice
from superset.utils import core as utils
from .helpers import config, get_example_data, get_slice_json, merge_slice, TBL
def load_random_time_series_data(
only_metadata: bool = False, force: bool = False
) -> None:
"""Loading random time series data from a zip file in the repo"""
tbl_name = "random_time_series"
database = utils.get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
data = get_example_data("random_time_series.json.gz")
pdf = pd.read_json(data)
if database.backend == "presto":
pdf.ds = pd.to_datetime(pdf.ds, unit="s")
pdf.ds = pdf.ds.dt.strftime("%Y-%m-%d %H:%M%:%S")
else:
pdf.ds = pd.to_datetime(pdf.ds, unit="s")
pdf.to_sql(
tbl_name,
database.get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={"ds": DateTime if database.backend != "presto" else String(255)},
index=False,
)
print("Done loading table!")
print("-" * 80)
print(f"Creating table [{tbl_name}] reference")
obj = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not obj:
obj = TBL(table_name=tbl_name)
obj.main_dttm_col = "ds"
obj.database = database
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"granularity_sqla": "day",
"row_limit": config["ROW_LIMIT"],
"since": "2019-01-01",
"until": "2019-02-01",
"metric": "count",
"viz_type": "cal_heatmap",
"domain_granularity": "month",
"subdomain_granularity": "day",
}
print("Creating a slice")
slc = Slice(
slice_name="Calendar Heatmap",
viz_type="cal_heatmap",
datasource_type="table",
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.