repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ExaScience/smurff | data/movielens/preprocess_movielens.py | 1 | 3946 | #!/usr/bin/python
from pprint import pprint
from numpy.core.fromnumeric import nonzero
import pandas as pd
import numpy as np
import scipy.sparse as sp
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
import smurff
movies = pd.read_csv("merged.csv")
movies["movieIdx"] = movies.index # used in sparse matrix
ratings = pd.read_csv("ratings.csv")
ratings["userIdx"] = ratings["userId"] - 1
ratings = pd.merge(ratings, movies, on = "movieId", how = "inner")
ratings_matrix = sp.coo_matrix((ratings['rating'].values,
(ratings['movieIdx'].values, ratings['userIdx'].values))).tocsr()
movies["genres_x"] = movies["genres_x"].str.replace("|", ",", regex=False)
movies["timestamp"] = pd.to_datetime(movies["date"]).fillna(pd.Timestamp(0.)).astype('int64') // 10**9
lang_ids = { l : i for i,l in enumerate(movies["language"].unique()) }
movies["language_id"] = movies["language"].map(lang_ids)
numerical_columns = [
"timestamp",
"runtime",
"revenue",
"budget",
"popularity",
"average_vote",
"num_votes",
"language_id",
]
category_columns = [
"genres_x",
"genres_y",
#"keywords",
#"director",
#"cast",
#"production_companies",
"production_countries",
]
movies[numerical_columns] = SimpleImputer(missing_values=np.nan, strategy='mean').fit_transform(movies[numerical_columns])
movies[numerical_columns] = StandardScaler().fit_transform(movies[numerical_columns])
movies.to_csv("movies_normalized.csv")
### --- category
movies[category_columns] = movies[category_columns].fillna("")
for col in category_columns:
movies[col] = movies[col].str.split(", ?")
features_vectorizer = DictVectorizer()
categories_dict = movies[category_columns].to_dict(orient = 'records')
categories_matrix = features_vectorizer.fit_transform(categories_dict)
for col,name in enumerate(features_vectorizer.get_feature_names()):
data = categories_matrix[:,col].data
num_nan = np.isnan(data).sum()
if num_nan > 0:
print(f"{name}: {num_nan} / {len(data)} - {data}")
features_matrix = sp.hstack((categories_matrix, sp.csc_matrix(movies[numerical_columns])))
import pickle
pickle.dump(features_vectorizer, open("movies_features_vectorizer.pickle", "wb"))
pickle.dump(features_matrix, open("movie_features_matrix.pickle", "wb"))
pickle.dump(ratings_matrix, open("ratings_matrix.pickle", "wb"))
smurff.matrix_io.write_matrix("ratings.sdm", ratings_matrix)
smurff.matrix_io.write_matrix("features.sdm", features_matrix)
#--- only save 1000 most popular movies and 1000 users
ratings_matrix = ratings_matrix.tocsr()
features_matrix = features_matrix.tocsr()
nnz_per_movie = [ ratings_matrix[r,:].nnz for r in range(ratings_matrix.shape[0]) ]
popular_movies = np.argsort(nnz_per_movie)
ratings_matrix_1k = ratings_matrix[popular_movies[-1000:], :]
features_matrix_1k = features_matrix[popular_movies[-1000:], :]
ratings_matrix_1k = ratings_matrix_1k.tocsc()
nnz_per_user = [ ratings_matrix_1k[:,r].nnz for r in range(ratings_matrix_1k.shape[1]) ]
popular_users = np.argsort(nnz_per_user)
ratings_matrix_1k = ratings_matrix_1k[:, popular_users[-1000:]]
smurff.matrix_io.write_matrix("ratings_1k_popular.sdm", ratings_matrix_1k)
smurff.matrix_io.write_matrix("features_1k_popular.sdm", features_matrix_1k)
#--- save 1000 random movies and 1000 random users
random_movies = np.random.permutation(ratings_matrix.shape[0])
ratings_matrix_1k = ratings_matrix[random_movies[-1000:], :]
features_matrix_1k = features_matrix[random_movies[-1000:], :]
ratings_matrix_1k = ratings_matrix_1k.tocsc()
random_users = np.random.permutation(ratings_matrix.shape[1])
ratings_matrix_1k = ratings_matrix_1k[:, random_users[-1000:]]
smurff.matrix_io.write_matrix("ratings_1k_random.sdm", ratings_matrix_1k)
smurff.matrix_io.write_matrix("features_1k_random.sdm", features_matrix_1k) | mit |
Og192/Python | sklearnLearning/statisticalAndSupervisedLearning/decisiontree.py | 2 | 1526 | print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "ryb"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdYlBu)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.RdYlBu, edgecolor='black', s=15)
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend(loc='lower right', borderpad=0, handletextpad=0)
plt.axis("tight")
plt.show() | gpl-2.0 |
simon-r/dr14_t.meter | dr14tmeter/dr_histogram.py | 1 | 3462 | # dr14_t.meter: compute the DR14 value of the given audiofiles
# Copyright (C) 2011 Simone Riva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
import math
from dr14tmeter.audio_math import *
try:
import matplotlib.pyplot as pyplot
import matplotlib.mlab as mlab
except:
pass
def compute_hist(Y, Fs, duration=None, bins=100, block_duration=0.2, plot=True, title=None):
s = Y.shape
if len(Y.shape) > 1:
ch = s[1]
else:
ch = 1
saples_per_block = int(Fs * block_duration)
seg_cnt = int(math.floor(float(s[0]) / saples_per_block)) + 1
if seg_cnt < 3:
return False
curr_sam = 0
rms = numpy.zeros((seg_cnt, ch))
#peaks = numpy.zeros((seg_cnt,ch))
for i in range(seg_cnt - 1):
#r = numpy.arange( curr_sam , curr_sam + saples_per_block )
rms[i, :] = u_rms(Y[curr_sam: curr_sam + saples_per_block, :])
curr_sam = curr_sam + saples_per_block
i = seg_cnt - 1
r = numpy.arange(curr_sam, s[0])
if r.shape[0] > 0:
rms[i, :] = dr_rms(Y[r, :])
rms = numpy.sum(rms, 1) / float(ch)
rms[rms == 0.0] = audio_min16()
rms = decibel_u(rms, 1.0)
rms_mean = numpy.mean(rms)
rms_std = numpy.std(rms)
rms_max = numpy.max(rms)
abs_peak = decibel_u(numpy.max(numpy.abs(Y)), 1.0)
if plot == True:
(hist, bin_edges, patches) = pyplot.hist(rms, 100, normed=True)
mean_x = numpy.array([rms_mean, rms_mean])
mean_y = numpy.array([0.0, numpy.max(hist) * 1.01])
std_x = numpy.array([rms_mean - rms_std, rms_mean + rms_std])
std_y = numpy.array([numpy.max(hist) * 0.7, numpy.max(hist) * 0.7])
pyplot.plot(mean_x, mean_y, linewidth=2, color='g')
pyplot.plot(std_x, std_y, linewidth=2, ls='--', color='c')
#print( hist )
pyplot.axis([-92, 0, 0, numpy.max(hist) * 1.05])
text_rel_pos = 0.9
pyplot.text(-85, numpy.max(hist) * text_rel_pos,
"mean: %.3f dB" % rms_mean, fontsize=15,)
pyplot.text(-85, numpy.max(hist) * (text_rel_pos - 0.05),
"std dev: %.3f dB" % rms_std, fontsize=15,)
pyplot.text(-85, numpy.max(hist) * (text_rel_pos - 0.10),
"peak: %.3f dB" % abs_peak, fontsize=15,)
pyplot.text(-85, numpy.max(hist) * (text_rel_pos - 0.15),
"max rms: %.3f dB" % rms_max, fontsize=15,)
pyplot.xlabel('RMS dB')
pyplot.ylabel('Relative frequency')
if title != None:
hist_title = title
else:
hist_title = "Hystogram of dynamic"
pyplot.title(r'%s' % hist_title)
pyplot.plot(mean_x, mean_y)
pyplot.grid(True)
pyplot.show()
else:
(hist, bin_edges) = numpy.histogram(rms, bins=bins, normed=True)
return (hist, bin_edges)
| gpl-3.0 |
pgroth/independence-indicators | Temporal-Coauthor-Networks/vincent/vincent/data.py | 3 | 16914 | # -*- coding: utf-8 -*-
"""
Data: Vincent Data Class for data importing and Vega Data type
"""
from __future__ import (print_function, division)
import time
import json
from .core import (
_assert_is_type,
ValidationError,
grammar,
GrammarClass,
LoadError
)
from ._compat import str_types
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
class Data(GrammarClass):
"""Data container for visualization
The Vega document may contain the data itself or a reference to a URL
containing the data and formatting instructions. Additionally, new data
can be created from old data via the transform fields.
"""
_default_index_key = 'idx'
def __init__(self, name=None, **kwargs):
"""Initialize a Data object
Parameters
----------
name : string, default None
Name of the data set. If None (default), then the name will be
set to ``'table'``.
**kwargs : dict
Attributes to set on initialization.
"""
super(self.__class__, self).__init__(**kwargs)
self.name = name if name else 'table'
@grammar(str_types)
def name(value):
"""string : Name of the data
This is used by other components (``Mark``, etc.) for reference.
"""
@grammar(str_types)
def url(value):
"""string : URL from which to load the data
This can be used as an alternative to defining the data in the
``values`` attribute.
"""
@grammar(list)
def values(value):
"""list : Data contents
Data is represented in tabular form, where each element of
``values`` corresponds to a row of data. Each row of data is
represented by a dict or a raw number. The keys of the dict are
columns and the values are individual data points. The keys of the
dicts must be strings for the data to correctly serialize to JSON.
The data will often have an "index" column representing the
independent variable, with the remaining columns representing the
dependent variables, though this is not required. The ``Data`` class
itself, however, is agnostic to which columns are dependent and
independent.
For example, the values attribute
``[{'x': 0, 'y': 3.2}, {'x': 1, 'y': 1.3}]``
could represent two rows of two variables - possibly an independent
variable ``'x'`` and a dependent variable ``'y'``.
For simple data sets, an alternative values attribute could be a
simple list of numbers such as
``[2, 12, 3, 5]``.
It may be more convenient to load data from pandas or NumPy objects.
See the methods :func:`Data.from_pandas` and
:func:`Data.from_numpy`.
"""
for row in value:
_assert_is_type('values row', row, (float, int, dict))
@grammar(str_types)
def source(value):
"""string : ``name`` field of another data set
This is typically used with data transforms to create new data
values.
"""
@grammar(list)
def transform(value):
"""list : transforms to apply to the data
Note: Transform-relational classes are not yet implemented.
"""
@grammar(dict)
def format(value):
"""dict : information about the data format
This is only used when loading data from the ``url`` attribute.
Format-relational classes are not yet implemented.
"""
def validate(self, *args):
"""Validate contents of class
"""
super(self.__class__, self).validate(*args)
if not self.name:
raise ValidationError('name is required for Data')
@staticmethod
def serialize(obj):
"""Convert an object into a JSON-serializable value
This is used by the ``from_pandas`` and ``from_numpy`` functions to
convert data to JSON-serializable types when loading.
"""
if isinstance(obj, str_types):
return obj
elif hasattr(obj, 'timetuple'):
return int(time.mktime(obj.timetuple())) * 1000
elif hasattr(obj, 'item'):
return obj.item()
elif hasattr(obj, '__float__'):
if isinstance(obj, int):
return int(obj)
else:
return float(obj)
elif hasattr(obj, '__int__'):
return int(obj)
else:
raise LoadError('cannot serialize index of type '
+ type(obj).__name__)
@classmethod
def from_pandas(cls, data, columns=None, key_on='idx', name=None,
series_key='data', grouped=False, records=False, **kwargs):
"""Load values from a pandas ``Series`` or ``DataFrame`` object
Parameters
----------
data : pandas ``Series`` or ``DataFrame``
Pandas object to import data from.
columns: list, default None
DataFrame columns to convert to Data. Keys default to col names.
If columns are given and on_index is False, x-axis data will
default to the first column.
key_on: string, default 'index'
Value to key on for x-axis data. Defaults to index.
name : string, default None
Applies to the ``name`` attribute of the generated class. If
``None`` (default), then the ``name`` attribute of ``pd_obj`` is
used if it exists, or ``'table'`` if it doesn't.
series_key : string, default 'data'
Applies only to ``Series``. If ``None`` (default), then defaults to
data.name. For example, if ``series_key`` is ``'x'``, then the
entries of the ``values`` list
will be ``{'idx': ..., 'col': 'x', 'val': ...}``.
grouped: boolean, default False
Pass true for an extra grouping parameter
records: boolean, defaule False
Requires Pandas 0.12 or greater. Writes the Pandas DataFrame
using the df.to_json(orient='records') formatting.
**kwargs : dict
Additional arguments passed to the :class:`Data` constructor.
"""
# Note: There's an experimental JSON encoder floating around in
# pandas land that hasn't made it into the main branch. This
# function should be revisited if it ever does.
if not pd:
raise LoadError('pandas could not be imported')
if not hasattr(data, 'index'):
raise ValueError('Please load a Pandas object.')
if name:
vega_data = cls(name=name, **kwargs)
else:
vega_data = cls(name='table', **kwargs)
pd_obj = data.copy()
if columns:
pd_obj = data[columns]
if key_on != 'idx':
pd_obj.index = data[key_on]
if records:
#The worst
vega_data.values = json.loads(pd_obj.to_json(orient='records'))
return vega_data
vega_data.values = []
if isinstance(pd_obj, pd.Series):
data_key = data.name or series_key
for i, v in pd_obj.iterkv():
value = {}
value['idx'] = cls.serialize(i)
value['col'] = data_key
value['val'] = cls.serialize(v)
vega_data.values.append(value)
elif isinstance(pd_obj, pd.DataFrame):
# We have to explicitly convert the column names to strings
# because the json serializer doesn't allow for integer keys.
for i, row in pd_obj.iterrows():
for num, (k, v) in enumerate(row.iterkv()):
value = {}
value['idx'] = cls.serialize(i)
value['col'] = cls.serialize(k)
value['val'] = cls.serialize(v)
if grouped:
value['group'] = num
vega_data.values.append(value)
else:
raise ValueError('cannot load from data type '
+ type(pd_obj).__name__)
return vega_data
@classmethod
def from_numpy(cls, np_obj, name, columns, index=None, index_key=None,
**kwargs):
"""Load values from a numpy array
Parameters
----------
np_obj : numpy.ndarray
numpy array to load data from
name : string
``name`` field for the data
columns : iterable
Sequence of column names, from left to right. Must have same
length as the number of columns of ``np_obj``.
index : iterable, default None
Sequence of indices from top to bottom. If ``None`` (default),
then the indices are integers starting at 0. Must have same
length as the number of rows of ``np_obj``.
index_key : string, default None
Key to use for the index. If ``None`` (default), ``idx`` is
used.
**kwargs : dict
Additional arguments passed to the :class:`Data` constructor
Notes
-----
The individual elements of ``np_obj``, ``columns``, and ``index``
must return valid values from :func:`Data.serialize`.
"""
if not np:
raise LoadError('numpy could not be imported')
_assert_is_type('numpy object', np_obj, np.ndarray)
# Integer index if none is provided
index = index or range(np_obj.shape[0])
# Explicitly map dict-keys to strings for JSON serializer.
columns = list(map(str, columns))
index_key = index_key or cls._default_index_key
if len(index) != np_obj.shape[0]:
raise LoadError(
'length of index must be equal to number of rows of array')
elif len(columns) != np_obj.shape[1]:
raise LoadError(
'length of columns must be equal to number of columns of '
'array')
data = cls(name=name, **kwargs)
data.values = [
dict([(index_key, cls.serialize(idx))] +
[(col, x) for col, x in zip(columns, row)])
for idx, row in zip(index, np_obj.tolist())]
return data
@classmethod
def from_mult_iters(cls, name=None, idx=None, **kwargs):
"""Load values from multiple iters
Parameters
----------
name : string, default None
Name of the data set. If None (default), the name will be set to
``'table'``.
idx: string, default None
Iterable to use for the data index
**kwargs : dict of iterables
The ``values`` field will contain dictionaries with keys for
each of the iterables provided. For example,
d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30))
would result in ``d`` having a ``values`` field with
[{'idx': 0, 'col': 'y', 'val': 10},
{'idx': 1, 'col': 'y', 'val': 20}
If the iterables are not the same length, then ValueError is
raised.
"""
if not name:
name = 'table'
lengths = [len(v) for v in kwargs.values()]
if len(set(lengths)) != 1:
raise ValueError('Iterables must all be same length')
if not idx:
raise ValueError('Must provide iter name index reference')
index = kwargs.pop(idx)
vega_vals = []
for k, v in sorted(kwargs.items()):
for idx, val in zip(index, v):
value = {}
value['idx'] = idx
value['col'] = k
value['val'] = val
vega_vals.append(value)
return cls(name, values=vega_vals)
@classmethod
def from_iter(cls, data, name=None):
"""Convenience method for loading data from an iterable.
Defaults to numerical indexing for x-axis.
Parameters
----------
data: iterable
An iterable of data (list, tuple, dict of key/val pairs)
name: string, default None
Name of the data set. If None (default), the name will be set to
``'table'``.
"""
if not name:
name = 'table'
if isinstance(data, (list, tuple)):
data = {x: y for x, y in enumerate(data)}
values = [{'idx': k, 'col': 'data', 'val': v}
for k, v in sorted(data.items())]
return cls(name, values=values)
@classmethod
def keypairs(cls, data, columns=None, use_index=False, name=None):
"""This will format the data as Key: Value pairs, rather than the
idx/col/val style. This is useful for some transforms, and to
key choropleth map data
Standard Data Types:
List: [0, 10, 20, 30, 40]
Paired Tuples: ((0, 1), (0, 2), (0, 3))
Dict: {'A': 10, 'B': 20, 'C': 30, 'D': 40, 'E': 50}
Plus Pandas DataFrame and Series, and Numpy ndarray
Parameters
----------
data:
List, Tuple, Dict, Pandas Series/DataFrame, Numpy ndarray
columns: list, default None
If passing Pandas DataFrame, you must pass at least one column
name.If one column is passed, x-values will default to the index
values.If two column names are passed, x-values are columns[0],
y-values columns[1].
use_index: boolean, default False
Use the DataFrame index for your x-values
"""
if not name:
name = 'table'
cls.raw_data = data
#Tuples
if isinstance(data, tuple):
values = [{"x": x[0], "y": x[1]} for x in data]
#Lists
elif isinstance(data, list):
values = [{"x": x, "y": y}
for x, y in zip(range(len(data) + 1), data)]
#Dicts
elif isinstance(data, dict) or isinstance(data, pd.Series):
values = [{"x": x, "y": y} for x, y in sorted(data.items())]
#Dataframes
elif isinstance(data, pd.DataFrame):
if len(columns) > 1 and use_index:
raise ValueError('If using index as x-axis, len(columns)'
'cannot be > 1')
if use_index or len(columns) == 1:
values = [{"x": cls.serialize(x[0]),
"y": cls.serialize(x[1][columns[0]])}
for x in data.iterrows()]
else:
values = [{"x": cls.serialize(x[1][columns[0]]),
"y": cls.serialize(x[1][columns[1]])}
for x in data.iterrows()]
#NumPy arrays
elif isinstance(data, np.ndarray):
values = cls._numpy_to_values(data)
else:
raise TypeError('unknown data type %s' % type(data))
return cls(name, values=values)
@staticmethod
def _numpy_to_values(data):
'''Convert a NumPy array to values attribute'''
def to_list_no_index(xvals, yvals):
return [{"x": x, "y": np.asscalar(y)}
for x, y in zip(xvals, yvals)]
if len(data.shape) == 1 or data.shape[1] == 1:
xvals = range(data.shape[0] + 1)
values = to_list_no_index(xvals, data)
elif len(data.shape) == 2:
if data.shape[1] == 2:
# NumPy arrays and matrices have different iteration rules.
if isinstance(data, np.matrix):
xidx = (0, 0)
yidx = (0, 1)
else:
xidx = 0
yidx = 1
xvals = [np.asscalar(row[xidx]) for row in data]
yvals = [np.asscalar(row[yidx]) for row in data]
values = [{"x": x, "y": y} for x, y in zip(xvals, yvals)]
else:
raise ValueError('arrays with > 2 columns not supported')
else:
raise ValueError('invalid dimensions for ndarray')
return values
def to_json(self, validate=False, pretty_print=True, data_path=None):
"""Convert data to JSON
Parameters
----------
data_path : string
If not None, then data is written to a separate file at the
specified path. Note that the ``url`` attribute if the data must
be set independently for the data to load correctly.
Returns
-------
string
Valid Vega JSON.
"""
#TODO: support writing to separate file
return super(self.__class__, self).to_json(validate=validate,
pretty_print=pretty_print)
| gpl-2.0 |
BiaDarkia/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 79 | 2849 | # Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float64)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/axes_grid/demo_axes_rgb.py | 7 | 1836 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.axes_rgb import make_rgb_axes, RGBAxes
def get_demo_image():
from matplotlib.cbook import get_sample_data
f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False)
z = np.load(f)
# z is a numpy array of 15x15
return z, (-3,4,-4,3)
def get_rgb():
Z, extent = get_demo_image()
Z[Z<0] = 0.
Z = Z/Z.max()
R = Z[:13,:13]
G = Z[2:,2:]
B = Z[:13,2:]
return R, G, B
def make_cube(r, g, b):
ny, nx = r.shape
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
return R, G, B, RGB
def demo_rgb():
fig, ax = plt.subplots()
ax_r, ax_g, ax_b = make_rgb_axes(ax, pad=0.02)
#fig.add_axes(ax_r)
#fig.add_axes(ax_g)
#fig.add_axes(ax_b)
r, g, b = get_rgb()
im_r, im_g, im_b, im_rgb = make_cube(r, g, b)
kwargs = dict(origin="lower", interpolation="nearest")
ax.imshow(im_rgb, **kwargs)
ax_r.imshow(im_r, **kwargs)
ax_g.imshow(im_g, **kwargs)
ax_b.imshow(im_b, **kwargs)
def demo_rgb2():
fig = plt.figure(2)
ax = RGBAxes(fig, [0.1, 0.1, 0.8, 0.8], pad=0.0)
#fig.add_axes(ax)
#ax.add_RGB_to_figure()
r, g, b = get_rgb()
kwargs = dict(origin="lower", interpolation="nearest")
ax.imshow_rgb(r, g, b, **kwargs)
ax.RGB.set_xlim(0., 9.5)
ax.RGB.set_ylim(0.9, 10.6)
for ax1 in [ax.RGB, ax.R, ax.G, ax.B]:
for sp1 in ax1.spines.values():
sp1.set_color("w")
for tick in ax1.xaxis.get_major_ticks() + ax1.yaxis.get_major_ticks():
tick.tick1line.set_mec("w")
tick.tick2line.set_mec("w")
return ax
demo_rgb()
ax = demo_rgb2()
plt.show()
| apache-2.0 |
kleskjr/scipy | scipy/interpolate/_cubic.py | 8 | 29300 | """Interpolation algorithms using piecewise cubic polynomials."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import string_types
from . import BPoly, PPoly
from .polyint import _isscalar
from scipy._lib._util import _asarray_validated
from scipy.linalg import solve_banded, solve
__all__ = ["PchipInterpolator", "pchip_interpolate", "pchip",
"Akima1DInterpolator", "CubicSpline"]
class PchipInterpolator(BPoly):
r"""PCHIP 1-d monotonic cubic interpolation.
`x` and `y` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. `x` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. `y`'s length along the interpolation
axis must be equal to the length of `x`. If N-D array, use `axis`
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
Akima1DInterpolator
CubicSpline
BPoly
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
SIAM J. Numer. Anal., 17(2), 238 (1980).
DOI:10.1137/0717021
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
DOI: http://dx.doi.org/10.1137/1.9780898717952
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x = _asarray_validated(x, check_finite=False, as_inexact=True)
y = _asarray_validated(y, check_finite=False, as_inexact=True)
axis = axis % y.ndim
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
yp = np.rollaxis(y, axis)
dk = self._find_derivatives(xp, yp)
data = np.hstack((yp[:, None, ...], dk[:, None, ...]))
_b = BPoly.from_derivatives(x, data, orders=None)
super(PchipInterpolator, self).__init__(_b.c, _b.x,
extrapolate=extrapolate)
self.axis = axis
def roots(self):
"""
Return the roots of the interpolated function.
"""
return (PPoly.from_bernstein_basis(self._bpoly)).roots()
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0-th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
# Backwards compatibility
pchip = PchipInterpolator
class Akima1DInterpolator(PPoly):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of `y` along the first axis must
be equal to the length of `x`.
axis : int, optional
Specifies the axis of `y` along which to interpolate. Interpolation
defaults to the first axis of `y`.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator
CubicSpline
PPoly
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# http://www.mathworks.de/matlabcentral/fileexchange/1814-akima-interpolation
x, y = map(np.asarray, (x, y))
axis = axis % y.ndim
if np.any(np.diff(x) < 0.):
raise ValueError("x must be strictly ascending")
if x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if x.size != y.shape[axis]:
raise ValueError("x.shape must equal y.shape[%s]" % axis)
# move interpolation axis to front
y = np.rollaxis(y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = np.diff(x)
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
# calculate the higher order coefficients
c = (3. * m[2:-2] - 2. * t[:-1] - t[1:]) / dx
d = (t[:-1] + t[1:] - 2. * m[2:-2]) / dx ** 2
coeff = np.zeros((4, x.size - 1) + y.shape[1:])
coeff[3] = y[:-1]
coeff[2] = t[:-1]
coeff[1] = c
coeff[0] = d
super(Akima1DInterpolator, self).__init__(coeff, x, extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(PPoly):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along `axis` (see below)
must match the length of `x`. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding `axis` dimension. For example, if `y`
is 1D, then `deriv_value` must be a scalar. If `y` is 3D with the
shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), `extrapolate` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same `x` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding `axis`. For example,
if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same `axis` which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
PPoly
Notes
-----
Parameters `bc_type` and `interpolate` work independently, i.e. the former
controls only construction of a spline, and the latter only evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(x, y, 'o', label='data')
>>> plt.plot(xs, np.sin(xs), label='true')
>>> plt.plot(xs, cs(xs), label="S")
>>> plt.plot(xs, cs(xs, 1), label="S'")
>>> plt.plot(xs, cs(xs, 2), label="S''")
>>> plt.plot(xs, cs(xs, 3), label="S'''")
>>> plt.xlim(-0.5, 9.5)
>>> plt.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> plt.plot(np.cos(xs), np.sin(xs), label='true')
>>> plt.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> plt.axes().set_aspect('equal')
>>> plt.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
n = x.shape[0]
y = np.rollaxis(y, axis)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See http://www.cfm.brown.edu/people/gk/chap6/node14.html for
# more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-2]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
# Compute coefficients in PPoly form.
t = (s[:-1] + s[1:] - 2 * slope) / dxr
c = np.empty((4, n - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - s[:-1]) / dxr - t
c[2] = s[:-1]
c[3] = y[:-1]
super(CubicSpline, self).__init__(c, x, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, string_types):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, string_types):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception:
raise ValueError("A specified derivative value must be "
"given in the form (order, value).")
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| bsd-3-clause |
wildux/CDI | 01_P_entropia.py | 1 | 1692 | # -*- coding: utf-8 -*-
"""
Joan Rodas Cusidó
"""
import math
import numpy as np
import matplotlib.pyplot as plt
'''
Dada una lista p, decidir si es una distribución de probabilidad (ddp)
0<=p[i]<=1, sum(p[i])=1.
'''
def es_ddp(p,tolerancia=10**(-5)):
suma = 0
for element in p:
if element < 0:
return false
suma += element
if (suma >= 1 - tolerancia) and (suma <= 1 + tolerancia):
return True
return False
'''
Dado un código C y una ddp p, hallar la longitud media del código.
'''
def LongitudMedia(C,p):
l = 0
i = 0
for element in C:
l += ( len(element)*p[i] )
i += 1
return l
'''
Dada una ddp p, hallar su entropía.
'''
def H1(p):
e = 0
for element in p:
if element > 10**(-5):
e += element * math.log(element, 2)
return -e
'''
Dada una lista de frecuencias n, hallar su entropía.
'''
def H2(n):
suma = 0
e = 0
for element in n:
suma += element
for element in n:
if element > 10**(-5):
element = element/suma
e += element * math.log(element, 2)
return -e
'''
Ejemplos
'''
C=['001','101','11','0001','000000001','0001','0000000000']
p=[0.5,0.1,0.1,0.1,0.1,0.1,0]
n=[5,2,1,1,1]
print('És ddp? ',es_ddp(p))
print('H1: ',H1(p))
print('H2: ',H2(n))
print('Long: ',LongitudMedia(C,p))
'''
Dibujar H(p,1-p)
'''
def draw():
x = []
y = []
for p in np.arange(0, 1.01, 0.01):
x.append(p)
y.append(H1([p,1-p]))
plt.plot(x, y)
plt.show()
'''
Hallar aproximadamente el máximo de H(p,q,1-p-q)
'''
def find_max():
maxim = 0
for p in np.arange(0, 1.01, 0.01):
for q in np.arange(0, 1.01, 0.01):
if p + q <= 1 and H1([p,q,1-p-q]) > maxim:
maxim = H1([p,q,1-p-q])
return maxim
draw()
print('Màxim: ',find_max())
| gpl-3.0 |
ricardog/raster-project | attic/show.py | 1 | 3273 | #!/usr/bin/env python
from copy import copy
import rasterio
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import pandas as pd
from rasterio.plot import show
import re
import pdb
import projections.pd_utils as pd_utils
from projections.lu.luh2 import LU
shape = (567, 1440)
bounds = (-180, -58, 180, 83.75)
palette = copy(plt.cm.viridis)
#palette.set_over('g', 1.0)
palette.set_under('r', 1.0)
palette.set_bad('k', 1.0)
palette2 = copy(plt.cm.viridis)
palette2.set_over('b', 1.0)
palette2.set_under('r', 1.0)
palette2.set_bad('k', 1.0)
def rcs(height, res, left, bottom, right, top):
er = 6378137.0
lats = np.linspace(top, bottom + res[1], height)
vec = ((np.sin(np.radians(lats + res[1] / 2.0)) -
np.sin(np.radians(lats - res[1] / 2.0))) *
(res[0] * np.pi/180) * er ** 2 / 1e6)
return vec.reshape((vec.shape[0], 1))
def check_hpd(df):
scale = rcs(shape[0], (0.25, 0.25), *bounds)
hpd = ma.masked_invalid(df['hpd'].values.reshape(shape))
total = (hpd * scale).sum()
#pdb.set_trace()
print("hpd: %10.2e" % total)
def check(lu, df):
if lu == 'timber':
return
if lu + '_minimal' in df.columns:
minimal = ma.masked_invalid(df[lu + '_minimal'].values.reshape(shape))
else:
minimal = 0
#if lu + '_light' in df.columns:
light = ma.masked_invalid(df[lu + '_light'].values.reshape(shape))
#else:
# light = 0
#if lu + '_intense' in df.columns:
intense = ma.masked_invalid(df[lu + '_intense'].values.reshape(shape))
#else:
# intense = 0
data = ma.masked_invalid(df[lu].values.reshape(shape))
total = (minimal + light + intense)
print('checking: %s [%6.4f | %8.3f]' % (lu, total.max(), (data - total).sum()))
assert np.all(data - total > -0.01)
if (data - total).sum() > 2:
#pdb.set_trace()
pass
#assert total.max() > 0.9
assert np.isclose(total.min(), 0)
pass
def check_sum(lus, df):
total = ma.masked_invalid(df[lus[0]].values.reshape(shape))
for lu in lus[1:]:
total += ma.masked_invalid(df[lu].values.reshape(shape))
print("%6.4f" % total.max())
print(map(lambda x: "%s, %6.4f" % (x,
df[x].values.reshape(shape)[444, 1208]),
LU.keys()))
pdb.set_trace()
#assert np.allclose(total, 1, equal_nan=True)
pass
def area(lu, df):
pass
def doit():
df1950 = pd_utils.load_pandas('/Volumes/Vagrant 155/playground/1950.pyd')
df2009 = pd_utils.load_pandas('/Volumes/Vagrant 155/playground/2009.pyd')
assert np.all(df1950.columns == df2009.columns)
check_hpd(df1950)
check_hpd(df2009)
check_sum(LU.keys(), df1950)
check_sum(LU.keys(), df2009)
for lu in LU.keys():
check(lu, df1950)
check(lu, df2009)
for col in df1950.columns:
t1 = ma.masked_invalid(df1950[col].values.reshape(shape))
t2 = ma.masked_invalid(df2009[col].values.reshape(shape))
if re.search(r'_ref$', col):
assert np.allclose(t1, t2, equal_nan=True)
else:
r = t2 / t1
r.mask = np.logical_or(r.mask,
np.logical_and(np.where(t1 == 0, True, False),
np.where(t2 == 0, True, False)))
mm = min(r.max(), 15)
show(r, title=str(col), vmin=0.99, vmax=1.01, cmap=palette)
doit()
| apache-2.0 |
mhue/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
vinodkc/spark | python/pyspark/sql/pandas/serializers.py | 23 | 12308 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Serializers for PyArrow and pandas conversions. See `pyspark.serializers` for more details.
"""
from pyspark.serializers import Serializer, read_int, write_int, UTF8Deserializer
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class ArrowCollectSerializer(Serializer):
"""
Deserialize a stream of batches followed by batch order information. Used in
PandasConversionMixin._collect_as_arrow() after invoking Dataset.collectAsArrowToPython()
in the JVM.
"""
def __init__(self):
self.serializer = ArrowStreamSerializer()
def dump_stream(self, iterator, stream):
return self.serializer.dump_stream(iterator, stream)
def load_stream(self, stream):
"""
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields
a list of indices that can be used to put the RecordBatches in the correct order.
"""
# load the batches
for batch in self.serializer.load_stream(stream):
yield batch
# load the batch order indices or propagate any error that occurred in the JVM
num = read_int(stream)
if num == -1:
error_msg = UTF8Deserializer().loads(stream)
raise RuntimeError("An error occurred while calling "
"ArrowCollectSerializer.load_stream: {}".format(error_msg))
batch_order = []
for i in range(num):
index = read_int(stream)
batch_order.append(index)
yield batch_order
def __repr__(self):
return "ArrowCollectSerializer(%s)" % self.serializer
class ArrowStreamSerializer(Serializer):
"""
Serializes Arrow record batches as a stream.
"""
def dump_stream(self, iterator, stream):
import pyarrow as pa
writer = None
try:
for batch in iterator:
if writer is None:
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
import pyarrow as pa
reader = pa.ipc.open_stream(stream)
for batch in reader:
yield batch
def __repr__(self):
return "ArrowStreamSerializer"
class ArrowStreamPandasSerializer(ArrowStreamSerializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
Parameters
----------
timezone : str
A timezone to respect when handling timestamp values
safecheck : bool
If True, conversion from Arrow to Pandas checks for overflow/truncation
assign_cols_by_name : bool
If True, then Pandas DataFrames will get columns by name
"""
def __init__(self, timezone, safecheck, assign_cols_by_name):
super(ArrowStreamPandasSerializer, self).__init__()
self._timezone = timezone
self._safecheck = safecheck
self._assign_cols_by_name = assign_cols_by_name
def arrow_to_pandas(self, arrow_column):
from pyspark.sql.pandas.types import _check_series_localize_timestamps, \
_convert_map_items_to_dict
import pyarrow
# If the given column is a date type column, creates a series of datetime.date directly
# instead of creating datetime64[ns] as intermediate data to avoid overflow caused by
# datetime64[ns] type handling.
s = arrow_column.to_pandas(date_as_object=True)
if pyarrow.types.is_timestamp(arrow_column.type):
return _check_series_localize_timestamps(s, self._timezone)
elif pyarrow.types.is_map(arrow_column.type):
return _convert_map_items_to_dict(s)
else:
return s
def _create_batch(self, series):
"""
Create an Arrow record batch from the given pandas.Series or list of Series,
with optional type.
Parameters
----------
series : pandas.Series or list
A single series, list of series, or list of (series, arrow_type)
Returns
-------
pyarrow.RecordBatch
Arrow RecordBatch
"""
import pandas as pd
import pyarrow as pa
from pyspark.sql.pandas.types import _check_series_convert_timestamps_internal, \
_convert_dict_to_map_items
from pandas.api.types import is_categorical_dtype
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
def create_array(s, t):
mask = s.isnull()
# Ensure timestamp series are in expected form for Spark internal representation
if t is not None and pa.types.is_timestamp(t):
s = _check_series_convert_timestamps_internal(s, self._timezone)
elif t is not None and pa.types.is_map(t):
s = _convert_dict_to_map_items(s)
elif is_categorical_dtype(s.dtype):
# Note: This can be removed once minimum pyarrow version is >= 0.16.1
s = s.astype(s.dtypes.categories.dtype)
try:
array = pa.Array.from_pandas(s, mask=mask, type=t, safe=self._safecheck)
except ValueError as e:
if self._safecheck:
error_msg = "Exception thrown when converting pandas.Series (%s) to " + \
"Arrow Array (%s). It can be caused by overflows or other " + \
"unsafe conversions warned by Arrow. Arrow safe type check " + \
"can be disabled by using SQL config " + \
"`spark.sql.execution.pandas.convertToArrowArraySafely`."
raise ValueError(error_msg % (s.dtype, t)) from e
else:
raise e
return array
arrs = []
for s, t in series:
if t is not None and pa.types.is_struct(t):
if not isinstance(s, pd.DataFrame):
raise ValueError("A field of type StructType expects a pandas.DataFrame, "
"but got: %s" % str(type(s)))
# Input partition and result pandas.DataFrame empty, make empty Arrays with struct
if len(s) == 0 and len(s.columns) == 0:
arrs_names = [(pa.array([], type=field.type), field.name) for field in t]
# Assign result columns by schema name if user labeled with strings
elif self._assign_cols_by_name and any(isinstance(name, str)
for name in s.columns):
arrs_names = [(create_array(s[field.name], field.type), field.name)
for field in t]
# Assign result columns by position
else:
arrs_names = [(create_array(s[s.columns[i]], field.type), field.name)
for i, field in enumerate(t)]
struct_arrs, struct_names = zip(*arrs_names)
arrs.append(pa.StructArray.from_arrays(struct_arrs, struct_names))
else:
arrs.append(create_array(s, t))
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in range(len(arrs))])
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
batches = (self._create_batch(series) for series in iterator)
super(ArrowStreamPandasSerializer, self).dump_stream(batches, stream)
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
batches = super(ArrowStreamPandasSerializer, self).load_stream(stream)
import pyarrow as pa
for batch in batches:
yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class ArrowStreamPandasUDFSerializer(ArrowStreamPandasSerializer):
"""
Serializer used by Python worker to evaluate Pandas UDFs
"""
def __init__(self, timezone, safecheck, assign_cols_by_name, df_for_struct=False):
super(ArrowStreamPandasUDFSerializer, self) \
.__init__(timezone, safecheck, assign_cols_by_name)
self._df_for_struct = df_for_struct
def arrow_to_pandas(self, arrow_column):
import pyarrow.types as types
if self._df_for_struct and types.is_struct(arrow_column.type):
import pandas as pd
series = [super(ArrowStreamPandasUDFSerializer, self).arrow_to_pandas(column)
.rename(field.name)
for column, field in zip(arrow_column.flatten(), arrow_column.type)]
s = pd.concat(series, axis=1)
else:
s = super(ArrowStreamPandasUDFSerializer, self).arrow_to_pandas(arrow_column)
return s
def dump_stream(self, iterator, stream):
"""
Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent.
This should be sent after creating the first record batch so in case of an error, it can
be sent back to the JVM before the Arrow stream starts.
"""
def init_stream_yield_batches():
should_write_start_length = True
for series in iterator:
batch = self._create_batch(series)
if should_write_start_length:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
should_write_start_length = False
yield batch
return ArrowStreamSerializer.dump_stream(self, init_stream_yield_batches(), stream)
def __repr__(self):
return "ArrowStreamPandasUDFSerializer"
class CogroupUDFSerializer(ArrowStreamPandasUDFSerializer):
def load_stream(self, stream):
"""
Deserialize Cogrouped ArrowRecordBatches to a tuple of Arrow tables and yield as two
lists of pandas.Series.
"""
import pyarrow as pa
dataframes_in_group = None
while dataframes_in_group is None or dataframes_in_group > 0:
dataframes_in_group = read_int(stream)
if dataframes_in_group == 2:
batch1 = [batch for batch in ArrowStreamSerializer.load_stream(self, stream)]
batch2 = [batch for batch in ArrowStreamSerializer.load_stream(self, stream)]
yield (
[self.arrow_to_pandas(c) for c in pa.Table.from_batches(batch1).itercolumns()],
[self.arrow_to_pandas(c) for c in pa.Table.from_batches(batch2).itercolumns()]
)
elif dataframes_in_group != 0:
raise ValueError(
'Invalid number of pandas.DataFrames in group {0}'.format(dataframes_in_group))
| apache-2.0 |
nrhine1/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 157 | 13799 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
ndevenish/simplehistogram | simplehist/hists.py | 1 | 6669 | # coding: utf-8
"""
hists.py
Copyright (c) 2014 Nicholas Devenish <[email protected]>
An easy, quick, lightweight histogram class based on ndarray
Initialise with bin indices:
>>> a = Hist([0, 1, 2, 3])
>>> len(a)
3
>>> a.bins
array([0, 1, 2, 3])
Optionally include data:
>>> Hist([0, 1, 2, 3], data=[1, 0.2, 3])
Hist([0, 1, 2, 3], data=[ 1. , 0.2, 3. ])
Or just specify the blank data type:
>>> a = Hist([0, 1, 2, 3], dtype=int)
>>> a
Hist([0, 1, 2, 3], data=[0, 0, 0])
You can do any normal numpy arithmetic operations:
>>> a = Hist([0, 1, 2, 3], data=[1, 0.2, 3])
>>> b = a + a
>>> b -= a
>>> all(a == b)
True
And you can fill bins from values:
>>> a = Hist([0,1,2,3])
>>> a.fill(1.4, 3)
>>> a
Hist([0, 1, 2, 3], data=[ 0., 3., 0.])
Or from arrays:
>>> a = Hist([0,1,2,3])
>>> a.fill([1.4, 2.4], weights=[1, 2])
>>> a
Hist([0, 1, 2, 3], data=[ 0., 1., 2.])
If you use pyROOT, you can convert from 1D histograms:
>>> type(source)
<class 'ROOT.TH1D'>
>>> convert = ashist(source)
>>> type(convert)
<class 'simplehist.hists.Hist'>
Or conversion from custom types - see simplehist.converter for
implementation details.
You can also draw histograms, using any of the options
that can be passed to matplotlib.pyplot.plot:
>>> hist_object.draw_hist(lw=2)
"""
import sys
import numpy
# A numpy array with bins, and constraints on those bins
class Hist(numpy.ndarray):
def __new__(cls, bins, data=None, **kwargs):
# If bins contains items that are list-like then it is probably multidim
if isinstance(bins[0], (tuple, list)):
# It must be multi-dimension...
bins = tuple(numpy.asarray(x) for x in bins)
ndims = len(bins)
shape = tuple(len(x)-1 for x in bins)
else:
# Just a single dimension
bins = numpy.asarray(bins)
assert bins.ndim == 1
ndims = 1
shape = (len(bins)-1,)
# Create or validate the data shape
if data is None:
# data = numpy.zeros(tuple(x-1 for x in bins.shape), **kwargs)
data = numpy.zeros(shape, **kwargs)
else:
data = numpy.asarray(data, **kwargs)
# Same dimensions and shape-1
assert ndims == data.ndim
if ndims == 1:
assert all(x == len(y)-1 for x, y in zip(data.shape, [bins]))
else:
assert all(x == len(y)-1 for x, y in zip(data.shape, bins))
# Cast from our data array
obj = data.view(cls)
obj._bins = bins
return obj
def __array_finalize__(self, obj):
# Since always creating as an alternate, this should never happen
assert obj is not None
# Other should always have a _bins object
self._bins = getattr(obj,"_bins",None)
def __array_wrap__(self,obj,context=None):
# if obj.ndim == 0 and obj.size == 1:
# return obj.item()
# Don't wrap as a hist if the shape changed - we have no idea how it did so
if not obj.shape == self.shape:
return obj
return super(Hist,self).__array_wrap__(obj,context)
@property
def bins(self):
return self._bins
@bins.setter
def bins(self, value):
value = numpy.asarray(value)
assert value.ndim == self.ndim
assert all(x == y-1 for x, y in zip(self.shape, value.shape))
self._bins = value
def __getitem__(self, index):
"""Return a value, or a subhist from a slice.
Getting singular indices just returns the values, whilst slices return
subhists, with applicable bins."""
return super(Hist, self).__getitem__(index)
if isinstance(index, tuple) and self.ndim == 1:
binSel = []
# Build a new tuple for each of the entries
for selection in index:
if selection is Ellipsis:
binSel.append(Ellipsis)
elif isinstance(selection, slice):
# Stepping really doesn't make much sense with bins
assert selection.step is None or selection.step == 1
if selection.stop is not None:
binSel.append(slice(selection.start, min(sys.maxint,selection.stop+1)))
else:
binSel.append(slice(selection.start, None))
elif isinstance(selection, int):
binSel.append(slice(selection, selection+1))
else:
# Throw away the hist information as we don't understand the request
return super(Hist, self).__getitem__(index).view(numpy.ndarray)
#assert False
# Build a new histogram with these bins
ret = super(Hist,self).__getitem__(index).view(Hist)
# If this gave us a hist..
if hasattr(ret, "_bins"):
ret._bins = self._bins.__getitem__(tuple(binSel))
return ret
else:
return super(Hist, self).__getitem__(index)
def __getslice__(self, i, j):
return self.__getitem__((slice(i,j),))
def __repr__(self):
# if numpy.all(self == 0):
# # Bin-only output
# return "{}(bins={})".format(type(self).__name__, numpy.array_repr(self._bins))
# else:
if self.ndim == 1:
return "{}({}, data={})".format(type(self).__name__,
numpy.array_repr(self._bins)[len("array("):-1],
numpy.array_repr(self)[len(type(self).__name__)+1:-1])
else:
return "{}(({}), data={})".format(type(self).__name__,
",".join([numpy.array_repr(x)[6:-1] for x in self._bins]),
numpy.array_repr(self)[len(type(self).__name__)+1:-1])
def fill(self, values, weights=None):
values = numpy.asarray(values)
if weights is not None:
weights = numpy.asarray(weights)
else:
weights = numpy.ones(values.shape)
assert values.shape == weights.shape
# Promote scalars, if required
if values.ndim == 0:
values = values[numpy.newaxis]
weights = weights[numpy.newaxis]
bins = numpy.digitize(values, self._bins)
newValues = numpy.zeros(self.shape)
# Now fill all the bins
for _bin, weight in zip(bins, weights):
if _bin < 1 or _bin > len(newValues):
continue
newValues[_bin-1] += weight
# add to the current instance
self += newValues
def draw_hist(self, **kwargs):
assert self.ndim == 1
import matplotlib.pyplot as plt
x = numpy.zeros(len(self)*2)
x[0::2] = self.bins[:-1]
x[1::2] = self.bins[1:]
y = numpy.array(numpy.repeat(self,2))
# import pdb
# pdb.set_trace()
return plt.plot(x,y,**kwargs)
def pcolor(self, *args, **kwargs):
assert self.ndim == 2
import matplotlib.pyplot as plt
plt.pcolor(self.bins[0], self.bins[1], self.T, *args, **kwargs)
def pcolormesh(self, *args, **kwargs):
assert self.ndim == 2
import matplotlib.pyplot as plt
plt.pcolor(self.bins[0], self.bins[1], self.T, *args, **kwargs) | mit |
ch3ll0v3k/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
RapidApplicationDevelopment/tensorflow | tensorflow/contrib/factorization/python/ops/kmeans.py | 3 | 10875 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of k-means clustering on top of learn (aka skflow) API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators._sklearn import TransformerMixin
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.monitors import BaseMonitor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
# TODO(agarwal,ands): support sharded input.
# TODO(agarwal,ands): enable stopping criteria based on improvements to cost.
# TODO(agarwal,ands): support random restarts.
class KMeansClustering(estimator.Estimator,
TransformerMixin):
"""K-Means clustering."""
SCORES = 'scores'
CLUSTER_IDX = 'cluster_idx'
CLUSTERS = 'clusters'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=clustering_ops.RANDOM_INIT,
distance_metric=clustering_ops.SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
kmeans_plus_plus_num_retries=2,
config=None):
"""Creates a model for running KMeans training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
initial_clusters: specifies how to initialize the clusters for training.
See clustering_ops.kmeans for the possible values.
distance_metric: the distance metric used for clustering.
See clustering_ops.kmeans for the possible values.
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
config: See Estimator
"""
super(KMeansClustering, self).__init__(
model_dir=model_dir,
config=config)
self.kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._num_clusters = num_clusters
self._training_initial_clusters = initial_clusters
self._training_graph = None
self._distance_metric = distance_metric
self._use_mini_batch = use_mini_batch
self._random_seed = random_seed
self._initialized = False
# pylint: disable=protected-access
class _StopWhenConverged(BaseMonitor):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, tolerance):
"""Initializes a '_StopWhenConverged' monitor.
Args:
tolerance: A relative tolerance of change between iterations.
"""
super(KMeansClustering._StopWhenConverged, self).__init__()
self._tolerance = tolerance
def begin(self, max_steps):
super(KMeansClustering._StopWhenConverged, self).begin(max_steps)
self._prev_loss = None
def step_begin(self, step):
super(KMeansClustering._StopWhenConverged, self).step_begin(step)
return [self._estimator._loss]
def step_end(self, step, output):
super(KMeansClustering._StopWhenConverged, self).step_end(step, output)
loss = output[self._estimator._loss]
if self._prev_loss is None:
self._prev_loss = loss
return False
relative_change = (abs(loss - self._prev_loss)
/ (1 + abs(self._prev_loss)))
self._prev_loss = loss
return relative_change < self._tolerance
# pylint: enable=protected-access
def fit(self, x, y=None, monitors=None, logdir=None, steps=None, batch_size=128,
relative_tolerance=None):
"""Trains a k-means clustering on x.
Note: See Estimator for logic for continuous training and graph
construction across multiple calls to fit.
Args:
x: training input matrix of shape [n_samples, n_features].
y: labels. Should be None.
monitors: Monitor object to print training progress and invoke early
stopping
logdir: the directory to save the log file that can be used for optional
visualization.
steps: number of training steps. If not None, overrides the value passed
in constructor.
batch_size: mini-batch size to use. Requires `use_mini_batch=True`.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
Note that this may not work correctly if use_mini_batch=True.
Returns:
Returns self.
"""
assert y is None
if logdir is not None:
self._model_dir = logdir
self._data_feeder = data_feeder.setup_train_data_feeder(
x, None, self._num_clusters, batch_size if self._use_mini_batch else None)
if relative_tolerance is not None:
if monitors is not None:
monitors += [self._StopWhenConverged(relative_tolerance)]
else:
monitors = [self._StopWhenConverged(relative_tolerance)]
# Make sure that we will eventually terminate.
assert ((monitors is not None and len(monitors)) or (steps is not None)
or (self.steps is not None))
self._train_model(input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps,
monitors=monitors,
init_feed_fn=self._data_feeder.get_feed_dict_fn())
return self
def predict(self, x, batch_size=None):
"""Predict cluster id for each element in x.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, containing cluster ids.
"""
return np.array([
prediction[KMeansClustering.CLUSTER_IDX] for prediction in
super(KMeansClustering, self).predict(
x=x, batch_size=batch_size, as_iterable=True)])
def score(self, x, batch_size=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total sum of distances to nearest clusters.
"""
return np.sum(
self.evaluate(x=x, batch_size=batch_size)[KMeansClustering.SCORES])
def transform(self, x, batch_size=None):
"""Transforms each element in x to distances to cluster centers.
Note that this function is different from the corresponding one in sklearn.
For SQUARED_EUCLIDEAN distance metric, sklearn transform returns the
EUCLIDEAN distance, while this function returns the SQUARED_EUCLIDEAN
distance.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
return np.array([
prediction[KMeansClustering.ALL_SCORES] for prediction in
super(KMeansClustering, self).predict(
x=x, batch_size=batch_size, as_iterable=True)])
def clusters(self):
"""Returns cluster centers."""
return tf.contrib.framework.load_variable(self.model_dir, self.CLUSTERS)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat(1, [features[k] for k in sorted(features.keys())])
return features
def _get_train_ops(self, features, _):
(_,
_,
losses,
training_op) = clustering_ops.KMeans(
self._parse_tensor_or_dict(features),
self._num_clusters,
self._training_initial_clusters,
self._distance_metric,
self._use_mini_batch,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self.kmeans_plus_plus_num_retries
).training_graph()
incr_step = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
self._loss = tf.reduce_sum(losses)
tf.contrib.deprecated.scalar_summary('loss/raw', self._loss)
training_op = with_dependencies([training_op, incr_step], self._loss)
return training_op, self._loss
def _get_predict_ops(self, features):
(all_scores,
model_predictions,
_,
_) = clustering_ops.KMeans(
self._parse_tensor_or_dict(features),
self._num_clusters,
self._training_initial_clusters,
self._distance_metric,
self._use_mini_batch,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self.kmeans_plus_plus_num_retries
).training_graph()
return {
KMeansClustering.ALL_SCORES: all_scores[0],
KMeansClustering.CLUSTER_IDX: model_predictions[0]
}
def _get_eval_ops(self, features, _, unused_metrics):
(_,
_,
losses,
_) = clustering_ops.KMeans(
self._parse_tensor_or_dict(features),
self._num_clusters,
self._training_initial_clusters,
self._distance_metric,
self._use_mini_batch,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self.kmeans_plus_plus_num_retries
).training_graph()
return {
KMeansClustering.SCORES: tf.reduce_sum(losses),
}
| apache-2.0 |
drscotthawley/SHAART | source/pwrspecwidget.py | 1 | 5123 | # Python Qt5 bindings for GUI objects
from PyQt5 import QtGui, QtWidgets
# import the Qt5Agg FigureCanvas object, that binds Figure to
# Qt5Agg backend. It also inherits from QWidget
from matplotlib.backends.backend_qt5agg \
import FigureCanvasQTAgg as FigureCanvas
# Matplotlib Figure object
from matplotlib.figure import Figure
#from matplotlib.backends.backend_qt5agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
import numpy as np
import scipy.signal as signal
import scipy
from os import path
import re
def my_resample(x,y,newnum):
num = len(x)
stride = int(num / newnum)
x2 = np.zeros(newnum)
y2 = np.zeros(newnum)
i = 0
for i2 in range(0,newnum):
x2[i2] = x[i]
y2[i2] = y[i]
i = i+stride
return x2, y2
class PwrSpecCanvas(FigureCanvas):
"""Class to represent the FigureCanvas widget"""
def __init__(self):
# setup Matplotlib Figure and Axis
self.fig = Figure()
# initialization of the canvas
FigureCanvas.__init__(self, self.fig)
self.ax = self.fig.clear()
self.ax = self.fig.add_subplot(111)
self.fig.subplots_adjust(left=0.09,right=0.98,bottom=0.13, top=.97)
# we define the widget as expandable
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
# notify the system of updated policy
FigureCanvas.updateGeometry(self)
class PwrSpecWidget(QtWidgets.QWidget):
"""Widget defined in Qt Designer"""
def __init__(self, parent = None):
# initialization of Qt MainWindow widget
QtWidgets.QWidget.__init__(self, parent)
# set the canvas to the Matplotlib widget
self.canvas = PwrSpecCanvas()
# create a vertical box layout
self.vbl = QtWidgets.QVBoxLayout()
# add pwrspec widget to vertical box
self.vbl.addWidget(self.canvas)
# add interactive navigation
self.navi_toolbar = NavigationToolbar(self.canvas, self)
self.vbl.addWidget(self.navi_toolbar)
# set the layout to th vertical box
self.setLayout(self.vbl)
def legend_string(self,instr):
instr = "%s" % instr # just to make sure it's of the right 'type'
outstr = path.basename(instr)
match = re.match(r"(.*)\.wav",outstr) # take out the .wav if possible
if (match is not None):
outstr = match.group(1)
return outstr
def draw_graph(self,amp,samplerate,color="red"):
"""Updates the graph with new data/annotations"""
print("pwrspec: Computing dB, amplength = ",len(amp))
#fftlength = 16384
#dB = 20.0*np.log10(np.abs(np.fft.rfft(amp,n=fftlength)))
# dB = 20 * scipy.log10(scipy.absolute(scipy.fft(amp)))
dB = 20.0*np.log10(np.abs(np.fft.rfft(amp))) # this works the best
print("pwrspec: finished Computing dB")
graphsamples = len(dB)
print("pwrspec computing f, graphsamples = ",graphsamples)
f = np.linspace(0, samplerate/2.0, graphsamples)
print("pwrspec finished computing f")
if (graphsamples > 100000):
graphsamples = 4096
elif (graphsamples > 50000):
graphsamples = 2048
else:
graphsamples = 1024
graphsamples = len(dB) #todo remove this
# ds_dB,ds_f = signal.resample(dB,graphsamples,f)
ds_f,ds_dB = my_resample(f,dB,graphsamples)
maxval = np.max(ds_dB)
ds_dB = [ x - maxval for x in ds_dB]
minval = np.min(ds_dB)
# Set up the plot
p = self.canvas.ax.plot(ds_f, ds_dB,color=color,lw=1)
self.canvas.ax.grid(True)
self.canvas.ax.axis([10,ds_f[-1],minval,0])
self.canvas.ax.set_xscale("log", nonposx='clip')
# Annotation
self.canvas.ax.set_xlabel('Frequency (Hz)')
self.canvas.ax.set_ylabel('Power (dB) ')
print("leaving draw_graph")
return p
def update_graph(self,amp,samplerate,filenameA,ampB,samplerateB,filenameB):
print("starting update_graph")
self.canvas.ax.clear()
p1, = self.draw_graph(amp,samplerate,"blue")
leg_fA = self.legend_string(filenameA)
if (ampB is not None) & (len(ampB) > 1):
p2, = self.draw_graph(ampB,samplerateB,"purple")
leg_fB = self.legend_string(filenameB)
l1 = self.canvas.ax.legend([p1,p2], [leg_fA,leg_fB], loc=3)
else:
l1 = self.canvas.ax.legend([p1], [leg_fA], loc=3)
l1.draw_frame(False) # no box around the legend
# Actually draw everything
self.canvas.draw()
print("leaving update_graph")
| gpl-2.0 |
plissonf/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
kaushik94/tardis | tardis/io/util.py | 1 | 11543 | # Utility functions for the IO part of TARDIS
import os
import re
import logging
import pandas as pd
import numpy as np
import collections
from collections import OrderedDict
import requests
import yaml
from tqdm.auto import tqdm
from tardis import constants
from astropy import units as u
from tardis import __path__ as TARDIS_PATH
logger = logging.getLogger(__name__)
def get_internal_data_path(fname):
"""
Get internal data path of TARDIS
Returns
-------
data_path: str
internal data path of TARDIS
"""
return os.path.join(TARDIS_PATH[0], "data", fname)
def quantity_from_str(text):
"""
Convert a string to `astropy.units.Quantity`
Parameters
----------
text:
The string to convert to `astropy.units.Quantity`
Returns
-------
`astropy.units.Quantity`
"""
value_str, unit_str = text.split(None, 1)
value = float(value_str)
if unit_str.strip() == "log_lsun":
value = 10 ** (value + np.log10(constants.L_sun.cgs.value))
unit_str = "erg/s"
unit = u.Unit(unit_str)
if unit == u.L_sun:
return value * constants.L_sun
return u.Quantity(value, unit_str)
class MockRegexPattern(object):
"""
A mock class to be used in place of a compiled regular expression
when a type check is needed instead of a regex match.
Note: This is usually a lot slower than regex matching.
"""
def __init__(self, target_type):
self.type = target_type
def match(self, text):
"""
Parameters
----------
text:
A string to be passed to `target_type` for conversion.
Returns
-------
`True` if `text` can be converted to `target_type`.
"""
try:
self.type(text)
except ValueError:
return False
return True
class YAMLLoader(yaml.Loader):
"""
A custom YAML loader containing all the constructors required
to properly parse the tardis configuration.
"""
def construct_quantity(self, node):
"""
A constructor for converting quantity-like YAML nodes to
`astropy.units.Quantity` objects.
Parameters
----------
node:
The YAML node to be constructed
Returns
-------
`astropy.units.Quantity`
"""
data = self.construct_scalar(node)
return quantity_from_str(data)
def mapping_constructor(self, node):
return OrderedDict(self.construct_pairs(node))
YAMLLoader.add_constructor("!quantity", YAMLLoader.construct_quantity)
YAMLLoader.add_implicit_resolver(
"!quantity", MockRegexPattern(quantity_from_str), None
)
YAMLLoader.add_implicit_resolver(
"tag:yaml.org,2002:float", MockRegexPattern(float), None
)
YAMLLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
YAMLLoader.mapping_constructor,
)
def yaml_load_file(filename, loader=yaml.Loader):
with open(filename) as stream:
return yaml.load(stream, Loader=loader)
def yaml_load_config_file(filename):
return yaml_load_file(filename, YAMLLoader)
def traverse_configs(base, other, func, *args):
"""
Recursively traverse a base dict or list along with another one
calling `func` for leafs of both objects.
Parameters
----------
base:
The object on which the traversing is done
other:
The object which is traversed along with `base`
func:
A function called for each leaf of `base` and the correspnding leaf of `other`
Signature: `func(item1, item2, *args)`
args:
Arguments passed into `func`
"""
if isinstance(base, collections.Mapping):
for k in base:
traverse_configs(base[k], other[k], func, *args)
elif (
isinstance(base, collections.Iterable)
and not isinstance(base, basestring)
and not hasattr(base, "shape")
):
for val1, val2 in zip(base, other):
traverse_configs(val1, val2, func, *args)
else:
func(base, other, *args)
def assert_equality(item1, item2):
assert type(item1) is type(item2)
try:
if hasattr(item1, "unit"):
assert item1.unit == item2.unit
assert np.allclose(item1, item2, atol=0.0)
except (ValueError, TypeError):
assert item1 == item2
def check_equality(item1, item2):
try:
traverse_configs(item1, item2, assert_equality)
except AssertionError:
return False
else:
return True
class HDFWriterMixin(object):
def __new__(cls, *args, **kwargs):
instance = super(HDFWriterMixin, cls).__new__(cls)
instance.optional_hdf_properties = []
instance.__init__(*args, **kwargs)
return instance
@staticmethod
def to_hdf_util(path_or_buf, path, elements, complevel=9, complib="blosc"):
"""
A function to uniformly store TARDIS data
to an HDF file.
Scalars will be stored in a Series under path/scalars
1D arrays will be stored under path/property_name as distinct Series
2D arrays will be stored under path/property_name as distinct DataFrames
Units will be stored as their CGS value
Parameters
----------
path_or_buf:
Path or buffer to the HDF store
path: str
Path inside the HDF store to store the `elements`
elements: dict
A dict of property names and their values to be
stored.
Returns
-------
"""
we_opened = False
try:
buf = pd.HDFStore(path_or_buf, complevel=complevel, complib=complib)
except TypeError as e: # Already a HDFStore
if e.message == "Expected bytes, got HDFStore":
buf = path_or_buf
else:
raise e
else: # path_or_buf was a string and we opened the HDFStore
we_opened = True
if not buf.is_open:
buf.open()
we_opened = True
scalars = {}
for key, value in elements.items():
if value is None:
value = "none"
if hasattr(value, "cgs"):
value = value.cgs.value
if np.isscalar(value):
scalars[key] = value
elif hasattr(value, "shape"):
if value.ndim == 1:
# This try,except block is only for model.plasma.levels
try:
pd.Series(value).to_hdf(buf, os.path.join(path, key))
except NotImplementedError:
pd.DataFrame(value).to_hdf(buf, os.path.join(path, key))
else:
pd.DataFrame(value).to_hdf(buf, os.path.join(path, key))
else:
try:
value.to_hdf(buf, path, name=key)
except AttributeError:
data = pd.DataFrame([value])
data.to_hdf(buf, os.path.join(path, key))
if scalars:
scalars_series = pd.Series(scalars)
# Unfortunately, with to_hdf we cannot append, so merge beforehand
scalars_path = os.path.join(path, "scalars")
try:
scalars_series = buf[scalars_path].append(scalars_series)
except KeyError: # no scalars in HDFStore
pass
scalars_series.to_hdf(buf, os.path.join(path, "scalars"))
if we_opened:
buf.close()
def get_properties(self):
data = {name: getattr(self, name) for name in self.full_hdf_properties}
return data
@property
def full_hdf_properties(self):
# If tardis was compiled --with-vpacket-logging, add vpacket properties
if hasattr(self, "virt_logging") and self.virt_logging == 1:
self.hdf_properties.extend(self.vpacket_hdf_properties)
return self.optional_hdf_properties + self.hdf_properties
@staticmethod
def convert_to_snake_case(s):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", s)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def to_hdf(self, file_path, path="", name=None):
"""
Parameters
----------
file_path: str
Path or buffer to the HDF store
path: str
Path inside the HDF store to store the `elements`
name: str
Group inside the HDF store to which the `elements` need to be saved
Returns
-------
"""
if name is None:
try:
name = self.hdf_name
except AttributeError:
name = self.convert_to_snake_case(self.__class__.__name__)
data = self.get_properties()
buff_path = os.path.join(path, name)
self.to_hdf_util(file_path, buff_path, data)
class PlasmaWriterMixin(HDFWriterMixin):
def get_properties(self):
data = {}
if self.collection:
properties = [
name
for name in self.plasma_properties
if isinstance(name, tuple(self.collection))
]
else:
properties = self.plasma_properties
for prop in properties:
for output in prop.outputs:
data[output] = getattr(prop, output)
data["atom_data_uuid"] = self.atomic_data.uuid1
if "atomic_data" in data:
data.pop("atomic_data")
if "nlte_data" in data:
logger.warning("nlte_data can't be saved")
data.pop("nlte_data")
return data
def to_hdf(self, file_path, path="", name=None, collection=None):
"""
Parameters
----------
file_path: str
Path or buffer to the HDF store
path: str
Path inside the HDF store to store the `elements`
name: str
Group inside the HDF store to which the `elements` need to be saved
collection:
`None` or a `PlasmaPropertyCollection` of which members are
the property types which will be stored. If `None` then
all types of properties will be stored.
This acts like a filter, for example if a value of
`property_collections.basic_inputs` is given, only
those input parameters will be stored to the HDF store.
Returns
-------
"""
self.collection = collection
super(PlasmaWriterMixin, self).to_hdf(file_path, path, name)
def download_from_url(url, dst):
"""
kindly used from https://gist.github.com/wy193777/0e2a4932e81afc6aa4c8f7a2984f34e2
@param: url to download file
@param: dst place to put the file
"""
file_size = int(requests.head(url).headers["Content-Length"])
if os.path.exists(dst):
first_byte = os.path.getsize(dst)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm(
total=file_size,
initial=first_byte,
unit="B",
unit_scale=True,
desc=url.split("/")[-1],
)
req = requests.get(url, headers=header, stream=True)
with open(dst, "ab") as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
| bsd-3-clause |
tomlof/scikit-learn | sklearn/utils/tests/test_extmath.py | 19 | 24513 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import np_version
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
for dtype in (np.float32, np.float64):
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
Xcsr = sparse.csr_matrix(X, dtype=dtype)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr), precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert_true(u_based)
assert_false(v_based)
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert_true(u_based)
assert_false(v_based)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(1 / (1 + np.exp(-x)))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
# ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
# ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
# ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
# min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum():
if np_version < (1, 9):
raise SkipTest("Sum is as unstable as cumsum for numpy < 1.9")
assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3]))
r = np.random.RandomState(0).rand(100000)
assert_warns(RuntimeWarning, stable_cumsum, r, rtol=0, atol=0)
# test axis parameter
A = np.random.RandomState(36).randint(1000, size=(5, 5, 5))
assert_array_equal(stable_cumsum(A, axis=0), np.cumsum(A, axis=0))
assert_array_equal(stable_cumsum(A, axis=1), np.cumsum(A, axis=1))
assert_array_equal(stable_cumsum(A, axis=2), np.cumsum(A, axis=2))
| bsd-3-clause |
zak-k/cartopy | lib/cartopy/tests/mpl/test_set_extent.py | 3 | 6485 | # (C) British Crown Copyright 2011 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import tempfile
from matplotlib.testing.decorators import cleanup
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import cartopy.crs as ccrs
@cleanup
def test_extents():
# tests that one can set the extents of a map in a variety of coordinate
# systems, for a variety of projection
uk = [-12.5, 4, 49, 60]
uk_crs = ccrs.Geodetic()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(uk, crs=uk_crs)
# enable to see what is going on (and to make sure it is a plot of the uk)
# ax.coastlines()
assert_array_almost_equal(ax.viewLim.get_points(),
np.array([[-12.5, 49.], [4., 60.]]))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(uk, crs=uk_crs)
# enable to see what is going on (and to make sure it is a plot of the uk)
# ax.coastlines()
assert_array_almost_equal(ax.viewLim.get_points(),
np.array([[-1034046.22566261, -4765889.76601514],
[333263.47741164, -3345219.0594531]])
)
# given that we know the PolarStereo coordinates of the UK, try using
# those in a PlateCarree plot
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent([-1034046, 333263, -4765889, -3345219],
crs=ccrs.NorthPolarStereo())
# enable to see what is going on (and to make sure it is a plot of the uk)
# ax.coastlines()
assert_array_almost_equal(ax.viewLim.get_points(),
np.array([[-17.17698577, 48.21879707],
[5.68924381, 60.54218893]])
)
@cleanup
def test_domain_extents():
# Setting the extent to global or the domain limits.
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent((-180, 180, -90, 90))
assert_array_equal(ax.viewLim.get_points(), [[-180, -90], [180, 90]])
ax.set_extent((-180, 180, -90, 90), ccrs.PlateCarree())
assert_array_equal(ax.viewLim.get_points(), [[-180, -90], [180, 90]])
ax = plt.axes(projection=ccrs.PlateCarree(90))
ax.set_extent((-180, 180, -90, 90))
assert_array_equal(ax.viewLim.get_points(), [[-180, -90], [180, 90]])
ax.set_extent((-180, 180, -90, 90), ccrs.PlateCarree(90))
assert_array_equal(ax.viewLim.get_points(), [[-180, -90], [180, 90]])
ax = plt.axes(projection=ccrs.OSGB())
ax.set_extent((0, 7e5, 0, 13e5), ccrs.OSGB())
assert_array_equal(ax.viewLim.get_points(), [[0, 0], [7e5, 13e5]])
def test_update_lim():
# check that the standard data lim setting works
ax = plt.axes(projection=ccrs.PlateCarree())
ax.update_datalim([(-10, -10), (-5, -5)])
assert_array_almost_equal(ax.dataLim.get_points(),
np.array([[-10., -10.], [-5., -5.]]))
plt.close()
def test_limits_contour():
xs, ys = np.meshgrid(np.linspace(250, 350, 15), np.linspace(-45, 45, 20))
data = np.sin((xs * ys) * 1.e7)
resulting_extent = np.array([[250 - 180, -45.], [-10. + 180, 45.]])
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
plt.contourf(xs, ys, data, transform=ccrs.PlateCarree(180))
assert_array_almost_equal(ax.dataLim, resulting_extent)
plt.close()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
plt.contour(xs, ys, data, transform=ccrs.PlateCarree(180))
assert_array_almost_equal(ax.dataLim, resulting_extent)
plt.close()
def test_limits_pcolor():
xs, ys = np.meshgrid(np.linspace(250, 350, 15), np.linspace(-45, 45, 20))
data = (np.sin((xs * ys) * 1.e7))[:-1, :-1]
resulting_extent = np.array([[250 - 180, -45.], [-10. + 180, 45.]])
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
plt.pcolor(xs, ys, data, transform=ccrs.PlateCarree(180))
assert_array_almost_equal(ax.dataLim, resulting_extent)
plt.close()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
plt.pcolormesh(xs, ys, data, transform=ccrs.PlateCarree(180))
assert_array_almost_equal(ax.dataLim, resulting_extent)
plt.close()
def test_view_lim_autoscaling():
x = np.linspace(0.12910209, 0.42141822)
y = np.linspace(0.03739792, 0.33029076)
x, y = np.meshgrid(x, y)
ax = plt.axes(projection=ccrs.RotatedPole(37.5, 357.5))
plt.scatter(x, y, x * y, transform=ccrs.PlateCarree())
expected = np.array([[86.12433701, 52.51570463],
[86.69696603, 52.86372057]])
assert_array_almost_equal(ax.viewLim.frozen().get_points(), expected,
decimal=2)
plt.draw()
assert_array_almost_equal(ax.viewLim.frozen().get_points(), expected,
decimal=2)
ax.autoscale_view(tight=False)
expected_non_tight = np.array([[86, 52.45], [86.8, 52.9]])
assert_array_almost_equal(ax.viewLim.frozen().get_points(),
expected_non_tight, decimal=1)
plt.close()
def test_view_lim_default_global():
ax = plt.axes(projection=ccrs.PlateCarree())
# The view lim should be the default unit bbox until it is drawn.
assert_array_almost_equal(ax.viewLim.frozen().get_points(),
[[0, 0], [1, 1]])
with tempfile.TemporaryFile() as tmp:
plt.savefig(tmp)
expected = np.array([[-180, -90], [180, 90]])
assert_array_almost_equal(ax.viewLim.frozen().get_points(),
expected)
plt.close()
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| lgpl-3.0 |
Clyde-fare/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
ual/urbansim | urbansim/tests/test_accounts.py | 5 | 2349 | import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import accounts
@pytest.fixture(scope='module')
def acc_name():
return 'test'
@pytest.fixture(scope='module')
def acc_bal():
return 1000
@pytest.fixture
def acc(acc_name, acc_bal):
return accounts.Account(acc_name, acc_bal)
def test_init(acc, acc_name):
assert acc.name == acc_name
assert acc.balance == 1000
assert acc.transactions == []
def test_add_transaction(acc, acc_bal):
amount = -50
subaccount = ('a', 'b', 'c')
metadata = {'for': 'light speed engine'}
acc.add_transaction(amount, subaccount, metadata)
assert len(acc.transactions) == 1
assert acc.balance == acc_bal + amount
t = acc.transactions[-1]
assert isinstance(t, accounts.Transaction)
assert t.amount == amount
assert t.subaccount == subaccount
assert t.metadata == metadata
def test_add_transactions(acc, acc_bal):
t1 = accounts.Transaction(200, ('a', 'b', 'c'), None)
t2 = (-50, None, {'to': 'Acme Corp.'})
t3 = (-100, ('a', 'b', 'c'), 'Acme Corp.')
t4 = (42, None, None)
acc.add_transactions((t1, t2, t3, t4))
assert len(acc.transactions) == 4
assert acc.balance == acc_bal + t1[0] + t2[0] + t3[0] + t4[0]
assert acc.total_transactions() == t1[0] + t2[0] + t3[0] + t4[0]
assert acc.total_transactions_by_subacct(('a', 'b', 'c')) == t1[0] + t3[0]
assert acc.total_transactions_by_subacct(None) == t2[0] + t4[0]
assert list(acc.all_subaccounts()) == [('a', 'b', 'c'), None]
assert list(acc.iter_subaccounts()) == [
(('a', 'b', 'c'), t1[0] + t3[0]),
(None, t2[0] + t4[0])]
def test_column_names_from_metadata():
cnfm = accounts._column_names_from_metadata
assert cnfm([]) == []
assert cnfm([{'a': 1, 'b': 2}]) == ['a', 'b']
assert cnfm([{'a': 1}, {'b': 2}]) == ['a', 'b']
assert cnfm([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) == ['a', 'b']
def test_to_frame(acc, acc_bal):
t1 = accounts.Transaction(200, ('a', 'b', 'c'), None)
t2 = (-50, None, {'to': 'Acme Corp.'})
acc.add_transactions((t1, t2))
expected = pd.DataFrame(
[[200, ('a', 'b', 'c'), None],
[-50, None, 'Acme Corp.']],
columns=['amount', 'subaccount', 'to'])
df = acc.to_frame()
pdt.assert_frame_equal(df, expected)
| bsd-3-clause |
MatthieuBizien/scikit-learn | examples/mixture/plot_gmm_covariances.py | 11 | 4723 | """
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# Modified by Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
print(__doc__)
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
if gmm.covariance_type == 'full':
covariances = gmm.covariances_[n][:2, :2]
elif gmm.covariance_type == 'tied':
covariances = gmm.covariances_[:2, :2]
elif gmm.covariance_type == 'diag':
covariances = np.diag(gmm.covariances_[n][:2])
elif gmm.covariance_type == 'spherical':
covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = dict((cov_type, GaussianMixture(n_components=n_classes,
covariance_type=cov_type, max_iter=20, random_state=0))
for cov_type in ['spherical', 'diag', 'tied', 'full'])
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators // 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_init = np.array([X_train[y_train == i].mean(axis=0)
for i in range(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators // 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(scatterpoints=1, loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
vigilv/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 76 | 45197 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
phobson/statsmodels | statsmodels/tsa/arima_process.py | 4 | 30886 | '''ARMA process and estimation with scipy.signal.lfilter
2009-09-06: copied from try_signal.py
reparameterized same as signal.lfilter (positive coefficients)
Notes
-----
* pretty fast
* checked with Monte Carlo and cross comparison with statsmodels yule_walker
for AR numbers are close but not identical to yule_walker
not compared to other statistics packages, no degrees of freedom correction
* ARMA(2,2) estimation (in Monte Carlo) requires longer time series to estimate parameters
without large variance. There might be different ARMA parameters
with similar impulse response function that cannot be well
distinguished with small samples (e.g. 100 observations)
* good for one time calculations for entire time series, not for recursive
prediction
* class structure not very clean yet
* many one-liners with scipy.signal, but takes time to figure out usage
* missing result statistics, e.g. t-values, but standard errors in examples
* no criteria for choice of number of lags
* no constant term in ARMA process
* no integration, differencing for ARIMA
* written without textbook, works but not sure about everything
briefly checked and it looks to be standard least squares, see below
* theoretical autocorrelation function of general ARMA
Done, relatively easy to guess solution, time consuming to get
theoretical test cases,
example file contains explicit formulas for acovf of MA(1), MA(2) and ARMA(1,1)
* two names for lag polynomials ar = rhoy, ma = rhoe ?
Properties:
Judge, ... (1985): The Theory and Practise of Econometrics
BigJudge p. 237ff:
If the time series process is a stationary ARMA(p,q), then
minimizing the sum of squares is asymptoticaly (as T-> inf)
equivalent to the exact Maximum Likelihood Estimator
Because Least Squares conditional on the initial information
does not use all information, in small samples exact MLE can
be better.
Without the normality assumption, the least squares estimator
is still consistent under suitable conditions, however not
efficient
Author: josefpktd
License: BSD
'''
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
from scipy import signal, optimize, linalg
def arma_generate_sample(ar, ma, nsample, sigma=1, distrvs=np.random.randn,
burnin=0):
"""
Generate a random sample of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nsample : int
length of simulated time series
sigma : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations at the
beginning of the sample are dropped
Returns
-------
sample : array
sample of ARMA process given by ar, ma of length nsample
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -arparams] # add zero-lag and negate
>>> ma = np.r_[1, maparams] # add zero-lag
>>> y = sm.tsa.arma_generate_sample(ar, ma, 250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
#TODO: unify with ArmaProcess method
eta = sigma * distrvs(nsample+burnin)
return signal.lfilter(ma, ar, eta)[burnin:]
def arma_acovf(ar, ma, nobs=10):
'''theoretical autocovariance function of ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acovf
Returns
-------
acovf : array
autocovariance of ARMA process given by ar, ma
See Also
--------
arma_acf
acovf
Notes
-----
Tries to do some crude numerical speed improvements for cases
with high persistance. However, this algorithm is slow if the process is
highly persistent and only a few autocovariances are desired.
'''
#increase length of impulse response for AR closer to 1
#maybe cheap/fast enough to always keep nobs for ir large
if np.abs(np.sum(ar)-1) > 0.9:
nobs_ir = max(1000, 2 * nobs) # no idea right now how large is needed
else:
nobs_ir = max(100, 2 * nobs) # no idea right now
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#better save than sorry (?), I have no idea about the required precision
#only checked for AR(1)
while ir[-1] > 5*1e-5:
nobs_ir *= 10
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#again no idea where the speed break points are:
if nobs_ir > 50000 and nobs < 1001:
acovf = np.array([np.dot(ir[:nobs-t], ir[t:nobs])
for t in range(nobs)])
else:
acovf = np.correlate(ir, ir, 'full')[len(ir)-1:]
return acovf[:nobs]
def arma_acf(ar, ma, nobs=10):
'''theoretical autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acf
Returns
-------
acf : array
autocorrelation of ARMA process given by ar, ma
See Also
--------
arma_acovf
acf
acovf
'''
acovf = arma_acovf(ar, ma, nobs)
return acovf/acovf[0]
def arma_pacf(ar, ma, nobs=10):
'''partial autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned pacf
Returns
-------
pacf : array
partial autocorrelation of ARMA process given by ar, ma
Notes
-----
solves yule-walker equation for each lag order up to nobs lags
not tested/checked yet
'''
apacf = np.zeros(nobs)
acov = arma_acf(ar, ma, nobs=nobs+1)
apacf[0] = 1.
for k in range(2, nobs+1):
r = acov[:k]
apacf[k-1] = linalg.solve(linalg.toeplitz(r[:-1]), r[1:])[-1]
return apacf
def arma_periodogram(ar, ma, worN=None, whole=0):
'''periodogram for ARMA process given by lag-polynomials ar and ma
Parameters
----------
ar : array_like
autoregressive lag-polynomial with leading 1 and lhs sign
ma : array_like
moving average lag-polynomial with leading 1
worN : {None, int}, optional
option for scipy.signal.freqz (read "w or N")
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : {0,1}, optional
options for scipy.signal.freqz
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is non-zero compute frequencies from 0 to 2*pi.
Returns
-------
w : array
frequencies
sd : array
periodogram, spectral density
Notes
-----
Normalization ?
This uses signal.freqz, which does not use fft. There is a fft version
somewhere.
'''
w, h = signal.freqz(ma, ar, worN=worN, whole=whole)
sd = np.abs(h)**2/np.sqrt(2*np.pi)
if np.sum(np.isnan(h)) > 0:
# this happens with unit root or seasonal unit root'
print('Warning: nan in frequency response h, maybe a unit root')
return w, sd
def arma_impulse_response(ar, ma, nobs=100):
'''get the impulse response function (MA representation) for ARMA process
Parameters
----------
ma : array_like, 1d
moving average lag polynomial
ar : array_like, 1d
auto regressive lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ir : array, 1d
impulse response function with nobs elements
Notes
-----
This is the same as finding the MA representation of an ARMA(p,q).
By reversing the role of ar and ma in the function arguments, the
returned result is the AR representation of an ARMA(p,q), i.e
ma_representation = arma_impulse_response(ar, ma, nobs=100)
ar_representation = arma_impulse_response(ma, ar, nobs=100)
fully tested against matlab
Examples
--------
AR(1)
>>> arma_impulse_response([1.0, -0.8], [1.], nobs=10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
this is the same as
>>> 0.8**np.arange(10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
MA(2)
>>> arma_impulse_response([1.0], [1., 0.5, 0.2], nobs=10)
array([ 1. , 0.5, 0.2, 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
ARMA(1,2)
>>> arma_impulse_response([1.0, -0.8], [1., 0.5, 0.2], nobs=10)
array([ 1. , 1.3 , 1.24 , 0.992 , 0.7936 ,
0.63488 , 0.507904 , 0.4063232 , 0.32505856, 0.26004685])
'''
impulse = np.zeros(nobs)
impulse[0] = 1.
return signal.lfilter(ma, ar, impulse)
#alias, easier to remember
arma2ma = arma_impulse_response
#alias, easier to remember
def arma2ar(ar, ma, nobs=100):
'''get the AR representation of an ARMA process
Parameters
----------
ar : array_like, 1d
auto regressive lag polynomial
ma : array_like, 1d
moving average lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ar : array, 1d
coefficients of AR lag polynomial with nobs elements
Notes
-----
This is just an alias for
``ar_representation = arma_impulse_response(ma, ar, nobs=100)`` which has
been fully tested against MATLAB.
Examples
--------
'''
return arma_impulse_response(ma, ar, nobs=nobs)
#moved from sandbox.tsa.try_fi
def ar2arma(ar_des, p, q, n=20, mse='ar', start=None):
'''find arma approximation to ar process
This finds the ARMA(p,q) coefficients that minimize the integrated
squared difference between the impulse_response functions
(MA representation) of the AR and the ARMA process. This does
currently not check whether the MA lagpolynomial of the ARMA
process is invertible, neither does it check the roots of the AR
lagpolynomial.
Parameters
----------
ar_des : array_like
coefficients of original AR lag polynomial, including lag zero
p, q : int
length of desired ARMA lag polynomials
n : int
number of terms of the impuls_response function to include in the
objective function for the approximation
mse : string, 'ar'
not used yet,
Returns
-------
ar_app, ma_app : arrays
coefficients of the AR and MA lag polynomials of the approximation
res : tuple
result of optimize.leastsq
Notes
-----
Extension is possible if we want to match autocovariance instead
of impulse response function.
TODO: convert MA lag polynomial, ma_app, to be invertible, by mirroring
roots outside the unit intervall to ones that are inside. How do we do
this?
'''
#p,q = pq
def msear_err(arma, ar_des):
ar, ma = np.r_[1, arma[:p-1]], np.r_[1, arma[p-1:]]
ar_approx = arma_impulse_response(ma, ar, n)
## print(ar,ma)
## print(ar_des.shape, ar_approx.shape)
## print(ar_des)
## print(ar_approx)
return (ar_des - ar_approx) # ((ar - ar_approx)**2).sum()
if start is None:
arma0 = np.r_[-0.9 * np.ones(p-1), np.zeros(q-1)]
else:
arma0 = start
res = optimize.leastsq(msear_err, arma0, ar_des, maxfev=5000)
#print(res)
arma_app = np.atleast_1d(res[0])
ar_app = np.r_[1, arma_app[:p-1]],
ma_app = np.r_[1, arma_app[p-1:]]
return ar_app, ma_app, res
def lpol2index(ar):
'''remove zeros from lagpolynomial, squeezed representation with index
Parameters
----------
ar : array_like
coefficients of lag polynomial
Returns
-------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
'''
ar = np.asarray(ar)
index = np.nonzero(ar)[0]
coeffs = ar[index]
return coeffs, index
def index2lpol(coeffs, index):
'''expand coefficients to lag poly
Parameters
----------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
ar : array_like
coefficients of lag polynomial
Returns
-------
ar : array_like
coefficients of lag polynomial
'''
n = max(index)
ar = np.zeros(n)
ar[index] = coeffs
return ar
#moved from sandbox.tsa.try_fi
def lpol_fima(d, n=20):
'''MA representation of fractional integration
.. math:: (1-L)^{-d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ma : array
coefficients of lag polynomial
'''
#hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
return np.exp(gammaln(d+j) - gammaln(j+1) - gammaln(d))
#moved from sandbox.tsa.try_fi
def lpol_fiar(d, n=20):
'''AR representation of fractional integration
.. math:: (1-L)^{d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ar : array
coefficients of lag polynomial
Notes:
first coefficient is 1, negative signs except for first term,
ar(L)*x_t
'''
#hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
ar = - np.exp(gammaln(-d+j) - gammaln(j+1) - gammaln(-d))
ar[0] = 1
return ar
#moved from sandbox.tsa.try_fi
def lpol_sdiff(s):
'''return coefficients for seasonal difference (1-L^s)
just a trivial convenience function
Parameters
----------
s : int
number of periods in season
Returns
-------
sdiff : list, length s+1
'''
return [1] + [0]*(s-1) + [-1]
def deconvolve(num, den, n=None):
"""Deconvolves divisor out of signal, division of polynomials for n terms
calculates den^{-1} * num
Parameters
----------
num : array_like
signal or lag polynomial
denom : array_like
coefficients of lag polynomial (linear filter)
n : None or int
number of terms of quotient
Returns
-------
quot : array
quotient or filtered series
rem : array
remainder
Notes
-----
If num is a time series, then this applies the linear filter den^{-1}.
If both num and den are both lagpolynomials, then this calculates the
quotient polynomial for n terms and also returns the remainder.
This is copied from scipy.signal.signaltools and added n as optional
parameter.
"""
num = np.atleast_1d(num)
den = np.atleast_1d(den)
N = len(num)
D = len(den)
if D > N and n is None:
quot = []
rem = num
else:
if n is None:
n = N-D+1
input = np.zeros(n, float)
input[0] = 1
quot = signal.lfilter(num, den, input)
num_approx = signal.convolve(den, quot, mode='full')
if len(num) < len(num_approx): # 1d only ?
num = np.concatenate((num, np.zeros(len(num_approx)-len(num))))
rem = num - num_approx
return quot, rem
class ArmaProcess(object):
"""
Represent an ARMA process for given lag-polynomials
This is a class to bring together properties of the process.
It does not do any estimation or statistical analysis.
Parameters
----------
ar : array_like, 1d
Coefficient for autoregressive lag polynomial, including zero lag.
See the notes for some information about the sign.
ma : array_like, 1d
Coefficient for moving-average lag polynomial, including zero lag
nobs : int, optional
Length of simulated time series. Used, for example, if a sample is
generated. See example.
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -ar] # add zero-lag and negate
>>> ma = np.r_[1, ma] # add zero-lag
>>> arma_process = sm.tsa.ArmaProcess(ar, ma)
>>> arma_process.isstationary
True
>>> arma_process.isinvertible
True
>>> y = arma_process.generate_sample(250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
# maybe needs special handling for unit roots
def __init__(self, ar, ma, nobs=100):
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.arcoefs = -self.ar[1:]
self.macoefs = self.ma[1:]
self.arpoly = np.polynomial.Polynomial(self.ar)
self.mapoly = np.polynomial.Polynomial(self.ma)
self.nobs = nobs
@classmethod
def from_coeffs(cls, arcoefs, macoefs, nobs=100):
"""
Create ArmaProcess instance from coefficients of the lag-polynomials
Parameters
----------
arcoefs : array-like
Coefficient for autoregressive lag polynomial, not including zero
lag. The sign is inverted to conform to the usual time series
representation of an ARMA process in statistics. See the class
docstring for more information.
macoefs : array-like
Coefficient for moving-average lag polynomial, including zero lag
nobs : int, optional
Length of simulated time series. Used, for example, if a sample
is generated.
"""
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
@classmethod
def from_estimation(cls, model_results, nobs=None):
"""
Create ArmaProcess instance from ARMA estimation results
Parameters
----------
model_results : ARMAResults instance
A fitted model
nobs : int, optional
If None, nobs is taken from the results
"""
arcoefs = model_results.arparams
macoefs = model_results.maparams
nobs = nobs or model_results.nobs
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
def __mul__(self, oth):
if isinstance(oth, self.__class__):
ar = (self.arpoly * oth.arpoly).coef
ma = (self.mapoly * oth.mapoly).coef
else:
try:
aroth, maoth = oth
arpolyoth = np.polynomial.Polynomial(aroth)
mapolyoth = np.polynomial.Polynomial(maoth)
ar = (self.arpoly * arpolyoth).coef
ma = (self.mapoly * mapolyoth).coef
except:
print('other is not a valid type')
raise
return self.__class__(ar, ma, nobs=self.nobs)
def __repr__(self):
return 'ArmaProcess(%r, %r, nobs=%d)' % (self.ar.tolist(),
self.ma.tolist(),
self.nobs)
def __str__(self):
return 'ArmaProcess\nAR: %r\nMA: %r' % (self.ar.tolist(),
self.ma.tolist())
def acovf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acovf(self.ar, self.ma, nobs=nobs)
acovf.__doc__ = arma_acovf.__doc__
def acf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acf(self.ar, self.ma, nobs=nobs)
acf.__doc__ = arma_acf.__doc__
def pacf(self, nobs=None):
nobs = nobs or self.nobs
return arma_pacf(self.ar, self.ma, nobs=nobs)
pacf.__doc__ = arma_pacf.__doc__
def periodogram(self, nobs=None):
nobs = nobs or self.nobs
return arma_periodogram(self.ar, self.ma, worN=nobs)
periodogram.__doc__ = arma_periodogram.__doc__
def impulse_response(self, nobs=None):
nobs = nobs or self.nobs
return arma_impulse_response(self.ar, self.ma, worN=nobs)
impulse_response.__doc__ = arma_impulse_response.__doc__
def arma2ma(self, nobs=None):
nobs = nobs or self.nobs
return arma2ma(self.ar, self.ma, nobs=nobs)
arma2ma.__doc__ = arma2ma.__doc__
def arma2ar(self, nobs=None):
nobs = nobs or self.nobs
return arma2ar(self.ar, self.ma, nobs=nobs)
arma2ar.__doc__ = arma2ar.__doc__
@property
def arroots(self):
"""
Roots of autoregressive lag-polynomial
"""
return self.arpoly.roots()
@property
def maroots(self):
"""
Roots of moving average lag-polynomial
"""
return self.mapoly.roots()
@property
def isstationary(self):
'''Arma process is stationary if AR roots are outside unit circle
Returns
-------
isstationary : boolean
True if autoregressive roots are outside unit circle
'''
if np.all(np.abs(self.arroots) > 1):
return True
else:
return False
@property
def isinvertible(self):
'''Arma process is invertible if MA roots are outside unit circle
Returns
-------
isinvertible : boolean
True if moving average roots are outside unit circle
'''
if np.all(np.abs(self.maroots) > 1):
return True
else:
return False
def invertroots(self, retnew=False):
'''make MA polynomial invertible by inverting roots inside unit circle
Parameters
----------
retnew : boolean
If False (default), then return the lag-polynomial as array.
If True, then return a new instance with invertible MA-polynomial
Returns
-------
manew : array
new invertible MA lag-polynomial, returned if retnew is false.
wasinvertible : boolean
True if the MA lag-polynomial was already invertible, returned if
retnew is false.
armaprocess : new instance of class
If retnew is true, then return a new instance with invertible
MA-polynomial
'''
#TODO: variable returns like this?
pr = self.ma_roots()
insideroots = np.abs(pr) < 1
if insideroots.any():
pr[np.abs(pr) < 1] = 1./pr[np.abs(pr) < 1]
pnew = np.polynomial.Polynomial.fromroots(pr)
mainv = pnew.coef/pnew.coef[0]
wasinvertible = False
else:
mainv = self.ma
wasinvertible = True
if retnew:
return self.__class__(self.ar, mainv, nobs=self.nobs)
else:
return mainv, wasinvertible
def generate_sample(self, nsample=100, scale=1., distrvs=None, axis=0,
burnin=0):
'''generate ARMA samples
Parameters
----------
nsample : int or tuple of ints
If nsample is an integer, then this creates a 1d timeseries of
length size. If nsample is a tuple, then the timeseries is along
axis. All other axis have independent arma samples.
scale : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations
at the beginning of the sample are dropped
axis : int
See nsample.
Returns
-------
rvs : ndarray
random sample(s) of arma process
Notes
-----
Should work for n-dimensional with time series along axis, but not
tested yet. Processes are sampled independently.
'''
if distrvs is None:
distrvs = np.random.normal
if np.ndim(nsample) == 0:
nsample = [nsample]
if burnin:
#handle burin time for nd arrays
#maybe there is a better trick in scipy.fft code
newsize = list(nsample)
newsize[axis] += burnin
newsize = tuple(newsize)
fslice = [slice(None)]*len(newsize)
fslice[axis] = slice(burnin, None, None)
fslice = tuple(fslice)
else:
newsize = tuple(nsample)
fslice = tuple([slice(None)]*np.ndim(newsize))
eta = scale * distrvs(size=newsize)
return signal.lfilter(self.ma, self.ar, eta, axis=axis)[fslice]
__all__ = ['arma_acf', 'arma_acovf', 'arma_generate_sample',
'arma_impulse_response', 'arma2ar', 'arma2ma', 'deconvolve',
'lpol2index', 'index2lpol']
if __name__ == '__main__':
# Simulate AR(1)
#--------------
# ar * y = ma * eta
ar = [1, -0.8]
ma = [1.0]
# generate AR data
eta = 0.1 * np.random.randn(1000)
yar1 = signal.lfilter(ar, ma, eta)
print("\nExample 0")
arest = ARIMAProcess(yar1)
rhohat, cov_x, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat)
print(cov_x)
print("\nExample 1")
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arest.generate_sample(ar,ma,1000,0.1)
arest = ARIMAProcess(y1)
rhohat1, cov_x1, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat1)
print(cov_x1)
err1 = arest.errfn(x=y1)
print(np.var(err1))
import statsmodels.api as sm
print(sm.regression.yule_walker(y1, order=2, inv=True))
print("\nExample 2")
nsample = 1000
ar = [1.0, -0.6, -0.1]
ma = [1.0, 0.3, 0.2]
y2 = ARIMA.generate_sample(ar,ma,nsample,0.1)
arest2 = ARIMAProcess(y2)
rhohat2, cov_x2, infodict, mesg, ier = arest2.fit((1,0,2))
print(rhohat2)
print(cov_x2)
err2 = arest.errfn(x=y2)
print(np.var(err2))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2,0,2))
print(rhohat2a)
print(cov_x2a)
err2a = arest.errfn(x=y2)
print(np.var(err2a))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y2, order=2, inv=True))
print("\nExample 20")
nsample = 1000
ar = [1.0]#, -0.8, -0.4]
ma = [1.0, 0.5, 0.2]
y3 = ARIMA.generate_sample(ar,ma,nsample,0.01)
arest20 = ARIMAProcess(y3)
rhohat3, cov_x3, infodict, mesg, ier = arest20.fit((2,0,0))
print(rhohat3)
print(cov_x3)
err3 = arest20.errfn(x=y3)
print(np.var(err3))
print(np.sqrt(np.dot(err3,err3)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
rhohat3a, cov_x3a, infodict, mesg, ier = arest20.fit((0,0,2))
print(rhohat3a)
print(cov_x3a)
err3a = arest20.errfn(x=y3)
print(np.var(err3a))
print(np.sqrt(np.dot(err3a,err3a)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y3, order=2, inv=True))
print("\nExample 02")
nsample = 1000
ar = [1.0, -0.8, 0.4] #-0.8, -0.4]
ma = [1.0]#, 0.8, 0.4]
y4 = ARIMA.generate_sample(ar,ma,nsample)
arest02 = ARIMAProcess(y4)
rhohat4, cov_x4, infodict, mesg, ier = arest02.fit((2,0,0))
print(rhohat4)
print(cov_x4)
err4 = arest02.errfn(x=y4)
print(np.var(err4))
sige = np.sqrt(np.dot(err4,err4)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4)))
print(np.sqrt(np.diag(cov_x4)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
rhohat4a, cov_x4a, infodict, mesg, ier = arest02.fit((0,0,2))
print(rhohat4a)
print(cov_x4a)
err4a = arest02.errfn(x=y4)
print(np.var(err4a))
sige = np.sqrt(np.dot(err4a,err4a)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4a)))
print(np.sqrt(np.diag(cov_x4a)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
import statsmodels.api as sm
print(sm.regression.yule_walker(y4, order=2, method='mle', inv=True))
import matplotlib.pyplot as plt
plt.plot(arest2.forecast()[-100:])
#plt.show()
ar1, ar2 = ([1, -0.4], [1, 0.5])
ar2 = [1, -1]
lagpolyproduct = np.convolve(ar1, ar2)
print(deconvolve(lagpolyproduct, ar2, n=None))
print(signal.deconvolve(lagpolyproduct, ar2))
print(deconvolve(lagpolyproduct, ar2, n=10))
| bsd-3-clause |
rs2/pandas | pandas/tests/series/methods/test_argsort.py | 3 | 2248 | import numpy as np
import pytest
from pandas import Series, Timestamp, isna
import pandas._testing as tm
class TestSeriesArgsort:
def _check_accum_op(self, name, ser, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(ser).values, func(np.array(ser)), check_dtype=check_dtype
)
# with missing values
ts = ser.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
def test_argsort(self, datetime_series):
self._check_accum_op("argsort", datetime_series, check_dtype=False)
argsorted = datetime_series.argsort()
assert issubclass(argsorted.dtype.type, np.integer)
# GH#2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp(f"201301{i:02d}") for i in range(1, 6)])
assert s.dtype == "datetime64[ns]"
shifted = s.shift(-1)
assert shifted.dtype == "datetime64[ns]"
assert isna(shifted[4])
result = s.argsort()
expected = Series(range(5), dtype="int64")
tm.assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(list(range(4)) + [-1], dtype="int64")
tm.assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind="mergesort")
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind="mergesort")
qexpected = np.argsort(s.values, kind="quicksort")
tm.assert_series_equal(mindexer.astype(np.intp), Series(mexpected))
tm.assert_series_equal(qindexer.astype(np.intp), Series(qexpected))
msg = (
r"ndarray Expected type <class 'numpy\.ndarray'>, "
r"found <class 'pandas\.core\.series\.Series'> instead"
)
with pytest.raises(AssertionError, match=msg):
tm.assert_numpy_array_equal(qindexer, mindexer)
def test_argsort_preserve_name(self, datetime_series):
result = datetime_series.argsort()
assert result.name == datetime_series.name
| bsd-3-clause |
tlmohren/Msc_thesis | Python/data_analysis_10_04_quarterbridge.py | 1 | 17173 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 05 08:30:32 2015
@author: Thomas
Analysis of quarterbridge experiments
"""
import sys,os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from thesis_functions import *
code_dir = os.getcwd()
data_dir = code_dir[:-6] + "data\\flapping_data_10_04\\"
matplotlib.rc('xtick', labelsize=30)
matplotlib.rc('ytick', labelsize=30)
matplotlib.rc('font', **font)
#%% --------------------------------------------------------------------------------------------
file_dict = {}
data_dir = "D:/Mijn_documenten/Dropbox/ThomasMohren/data/flapping_data_10_04/"
file_dict[0] = "fd_flapping_halfbridge"
file_dict[1] = "fd_flapping_hb_xrot3_flap10"
file_dict[2] = "fd_flapping_hb_xrot5_flap10"
file_dict[3] = "fd_flapping_hb_yrot3_flap10"
file_dict[4] = "fd_flapping_quarterbridge"
file_dict[5] = "fd_flapping_qb_xrot3_flap10"
file_dict[6] = "fd_flapping_qb_xrot5_flap10"
file_dict[7] = "fd_flapping_qb_yrot3_flap10"
file_dict[8] = "fd_flapping_hb_xrot3p_flap10"
file_dict[9] = "fd_flapping_hb_yrot3p_flap10"
file_dict[10] = "fd_flapping_qb_xrot3p_flap10"
file_dict[11] = "fd_flapping_qb_yrot3p_flap10"
lincols = ['r','b','g']
add_f = [0,0.09,0.18]
#%% ----------------------------------X_const_qb_ratio find
# recurring parameters
tnew = np.arange(1,69,0.001)
fft_range = [40000,45000]
titleplot = ""
# refreshed parameters
uplim = round(NV_2_strain(500+511,"quarter"),4)
freq_axis = [0,2.5,0,uplim]
peak_mat = np.zeros([3,10,2])
i = 0
k_range = [4,5,6]
for k in k_range:
file_name = file_dict[k]
SD_data = pd.read_csv(data_dir + file_name + ".CSV")
y = datafile_analyze("A0",SD_data,tnew)
y = NV_2_strain(y+511,"quarter")
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
t_nondim = tnew*flap_freq
f_nondim = freq/flap_freq
y_axis = [t_nondim[fft_range[0]],t_nondim[fft_range[1]],-uplim,uplim]
line = add_plot(1,211,t_nondim,y,titleplot,y_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=3,tickstyle='sci',figsize = [9,9])
plt.setp(line,color = lincols[i])
#
for l in np.arange(0,10,1):
fft_range = [17999+5000*l,22999+5000*l]
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
f_nondim = freq/flap_freq + add_f[i]
peak_mat[i,l,:] = find_peaks(tnew,y,fft_range)[0]
line = add_plot(1,212,f_nondim,abs(FFT),titleplot,freq_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=2,tickstyle='sci',figsize = [10,10])
plt.setp(line,color = lincols[i])
i = i +1
ratio1 = np.mean(peak_mat[1,[peak_mat[1,:,0].argsort()[5:]],0])/np.mean(peak_mat[0,[peak_mat[0,:,0].argsort()[5:]],0])
ratio2 = np.mean(peak_mat[2,[peak_mat[2,:,0].argsort()[5:]],0])/np.mean(peak_mat[1,[peak_mat[1,:,0].argsort()[5:]],0])
plt.figure(1)
plt.subplot(212)
for i in range(3):
p_mat = peak_mat[i,:,:]
p_top = p_mat[p_mat[:,0].argsort()][5:]
peak_mean = np.mean(p_top,0)
peak_std = np.std(p_top,0)
plt.errorbar([1+add_f[i],2+add_f[i]],peak_mean,peak_std, fmt='o',linewidth=2,color='k', capsize=10,capthick=2, zorder=3)
#%% ----------------------X_const_compensated------------------------------------------------------------------------------
# refreshed parameters
uplim = round(NV_2_strain(50+511,"quarter"),5)
freq_axis = [0,2.5,0,uplim]
peak_mat = np.zeros([3,10,2])
i = 0
for k in k_range:
file_name = file_dict[k]
SD_data = pd.read_csv(data_dir + file_name + ".CSV")
y = (datafile_analyze("A0",SD_data,tnew) - datafile_analyze("A1",SD_data,tnew))
if (i ==1):
y = y/ratio1
elif ( i ==2):
y = y/ratio2
y = NV_2_strain(y+511,"quarter")
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
t_nondim = tnew*flap_freq
f_nondim = freq/flap_freq
y_axis = [t_nondim[fft_range[0]],t_nondim[fft_range[1]],-uplim,uplim]
y_axis = [490,500,-uplim,uplim]
line = add_plot(2,211,t_nondim,y,titleplot,y_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=3,tickstyle='sci',figsize = [9,9])
plt.setp(line,color = lincols[i])
for l in np.arange(0,10,1):
fft_range = [17999+5000*l,22999+5000*l]
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
f_nondim = freq/flap_freq + add_f[i]
peak_mat[i,l,:] = find_peaks(tnew,y,fft_range)[0]
line = add_plot(2,212,f_nondim,abs(FFT),titleplot,freq_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=2,tickstyle='sci',figsize = [10,10])
plt.setp(line,color = lincols[i])
i = i +1
plt.figure(2)
plt.subplot(212)
for i in range(3):
p_mat = peak_mat[i,:,:]
p_top = p_mat[p_mat[:,0].argsort()][5:]
peak_mean = np.mean(p_top,0)
peak_std = np.std(p_top,0)
plt.errorbar([1+add_f[i],2+add_f[i]],peak_mean,peak_std, fmt='o',linewidth=2,color='k', capsize=10,capthick=2, zorder=3)
#%% ----------------------------------Y_const_qb_ratio find
uplim = round(NV_2_strain(500+511,"quarter"),4)
freq_axis = [0,2.5,0,uplim]
peak_mat = np.zeros([2,10,2])
i = 0
k_range = [4,7]
for k in k_range:
file_name = file_dict[k]
SD_data = pd.read_csv(data_dir + file_name + ".CSV")
y = datafile_analyze("A0",SD_data,tnew)
y = NV_2_strain(y+511,"quarter")
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
t_nondim = tnew*flap_freq
f_nondim = freq/flap_freq
y_axis = [t_nondim[fft_range[0]],t_nondim[fft_range[1]],-uplim,uplim]
y_axis = [210,220,-uplim,uplim]
line = add_plot(4,211,t_nondim,y,titleplot,y_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=3,tickstyle='sci',figsize = [9,9])
plt.setp(line,color = lincols[i])
for l in np.arange(0,10,1):
fft_range = [17999+5000*l,22999+5000*l]
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
f_nondim = freq/flap_freq + add_f[i]
peak_mat[i,l,:] = find_peaks(tnew,y,fft_range)[0]
line = add_plot(4,212,f_nondim,abs(FFT),titleplot,freq_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=2,tickstyle='sci',figsize = [10,10])
plt.setp(line,color = lincols[i])
i = i +1
ratio = np.mean(peak_mat[1,[peak_mat[1,:,0].argsort()[5:]],0])/np.mean(peak_mat[0,[peak_mat[0,:,0].argsort()[5:]],0])
plt.figure(4)
plt.subplot(212)
for i in range(2):
p_mat = peak_mat[i,:,:]
p_top = p_mat[p_mat[:,0].argsort()][5:]
peak_mean = np.mean(p_top,0)
peak_std = np.std(p_top,0)
plt.errorbar([1+add_f[i],2+add_f[i]],peak_mean,peak_std, fmt='o',linewidth=2,color='k', capsize=10,capthick=2, zorder=3)
#%% ----------------------Y_const_compensated------------------------------------------------------------------------------
# refreshed parameters
uplim = round(NV_2_strain(50+511,"quarter"),5)
freq_axis = [0,2.5,0,uplim]
peak_mat = np.zeros([2,10,2])
i = 0
for k in k_range:
file_name = file_dict[k]
SD_data = pd.read_csv(data_dir + file_name + ".CSV")
y = (datafile_analyze("A0",SD_data,tnew) - datafile_analyze("A1",SD_data,tnew))
if (i ==1):
y = y/ratio
y = NV_2_strain(y+511,"quarter")
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
t_nondim = tnew*flap_freq
f_nondim = freq/flap_freq
y_axis = [t_nondim[fft_range[0]],t_nondim[fft_range[1]],-uplim,uplim]
y_axis = [490,500,-uplim,uplim]
line = add_plot(5,211,t_nondim,y,titleplot,y_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=3,tickstyle='sci',figsize = [9,9])
plt.setp(line,color = lincols[i])
for l in np.arange(0,10,1):
fft_range = [17999+5000*l,22999+5000*l]
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
f_nondim = freq/flap_freq + add_f[i]
peak_mat[i,l,:] = find_peaks(tnew,y,fft_range)[0]
line = add_plot(5,212,f_nondim,abs(FFT),titleplot,freq_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=2,tickstyle='sci',figsize = [10,10])
plt.setp(line,color = lincols[i])
i = i +1
plt.figure(5)
plt.subplot(212)
for i in range(2):
p_mat = peak_mat[i,:,:]
p_top = p_mat[p_mat[:,0].argsort()][5:]
peak_mean = np.mean(p_top,0)
peak_std = np.std(p_top,0)
plt.errorbar([1+add_f[i],2+add_f[i]],peak_mean,peak_std, fmt='o',linewidth=2,color='k', capsize=10,capthick=2, zorder=3)
#%% ----------------------------------X_periodic_qb_ratio find
# refreshed parameters
uplim = round(NV_2_strain(500+511,"quarter"),4)
freq_axis = [0,2.5,0,uplim]
peak_mat = np.zeros([2,10,2])
i = 0
k_range = [4,10]
for k in k_range:
file_name = file_dict[k]
SD_data = pd.read_csv(data_dir + file_name + ".CSV")
y = datafile_analyze("A0",SD_data,tnew)
y = NV_2_strain(y+511,"quarter")
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
t_nondim = tnew*flap_freq
f_nondim = freq/flap_freq
y_axis = [t_nondim[fft_range[0]],t_nondim[fft_range[1]],-uplim,uplim]
y_axis = [550,560,-uplim,uplim]
line = add_plot(7,211,t_nondim,y,titleplot,y_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=3,tickstyle='sci',figsize = [9,9])
plt.setp(line,color = lincols[i])
for l in np.arange(0,10,1):
fft_range = [17999+5000*l,22999+5000*l]
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
f_nondim = freq/flap_freq + add_f[i]
peak_mat[i,l,:] = find_peaks(tnew,y,fft_range)[0]
line = add_plot(7,212,f_nondim,abs(FFT),titleplot,freq_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=2,tickstyle='sci',figsize = [10,10])
plt.setp(line,color = lincols[i])
i = i +1
ratio = np.mean(peak_mat[1,[peak_mat[1,:,0].argsort()[5:]],0])/np.mean(peak_mat[0,[peak_mat[0,:,0].argsort()[5:]],0])
plt.figure(7)
plt.subplot(212)
for i in range(2):
p_mat = peak_mat[i,:,:]
p_top = p_mat[p_mat[:,0].argsort()][5:]
peak_mean = np.mean(p_top,0)
peak_std = np.std(p_top,0)
plt.errorbar([1+add_f[i],2+add_f[i]],peak_mean,peak_std, fmt='o',linewidth=2,color='k', capsize=10,capthick=2, zorder=3)
#%% ----------------------X_const_compensated------------------------------------------------------------------------------
# refreshed parameters
uplim = round(NV_2_strain(50+511,"quarter"),5)
freq_axis = [0,2.5,0,uplim]
peak_mat = np.zeros([2,10,2])
offpeak_mat = np.zeros([4,10])
i = 0
for k in k_range:
file_name = file_dict[k]
SD_data = pd.read_csv(data_dir + file_name + ".CSV")
y = (datafile_analyze("A0",SD_data,tnew) - datafile_analyze("A1",SD_data,tnew))
if (i ==1):
y = y/ratio
y = NV_2_strain(y+511,"quarter")
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
t_nondim = tnew*flap_freq
f_nondim = freq/flap_freq
y_axis = [t_nondim[fft_range[0]],t_nondim[fft_range[1]],-uplim,uplim]
y_axis = [490,500,-uplim,uplim]
line = add_plot(8,211,t_nondim,y,titleplot,y_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=3,tickstyle='sci',figsize = [9,9])
plt.setp(line,color = lincols[i])
for l in np.arange(0,10,1):
fft_range = [17999+5000*l,22999+5000*l]
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
f_nondim = freq/flap_freq + add_f[i]
if(i==1):
offpeak_mat[:,l] = find_offpeaks(tnew,y,fft_range)
offpeak_mat[:2,l] = offpeak_mat[:2,l]/flap_freq
peak_mat[i,l,:] = find_peaks(tnew,y,fft_range)[0]
line = add_plot(8,212,f_nondim,abs(FFT),titleplot,freq_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=2,tickstyle='sci',figsize = [10,10])
plt.setp(line,color = lincols[i])
i = i +1
plt.figure(8)
plt.subplot(212)
for i in range(2):
p_mat = peak_mat[i,:,:]
p_top = p_mat[p_mat[:,0].argsort()]
peak_mean = np.mean(p_top,0)
peak_std = np.std(p_top,0)
plt.errorbar([1+add_f[i],2+add_f[i]],peak_mean,peak_std, fmt='o',linewidth=2,color='k', capsize=10,capthick=2, zorder=3)
p_mat = offpeak_mat
p_top = p_mat[:,p_mat[2,:].argsort()][:,5:]
peak_mean = np.mean(p_top,1)
peak_std = np.std(p_top,1)
print peak_mean
plt.figure(8)
plt.subplot(212)
plt.errorbar(peak_mean[0:2]+add_f[1],peak_mean[2:4],peak_std[2:4], fmt='o',linewidth=2,color='k', capsize=10,capthick=2, zorder=3)
#%% ----------------------------------Y_per_qb_ratio find
# refreshed parameters
uplim = round(NV_2_strain(500+511,"quarter"),4)
freq_axis = [0,2.5,0,uplim]
peak_mat = np.zeros([2,10,2])
i = 0
offpeak_mat = np.zeros([4,10])
k_range = [4,11]
for k in k_range:
file_name = file_dict[k]
SD_data = pd.read_csv(data_dir + file_name + ".CSV")
y = datafile_analyze("A0",SD_data,tnew)
y = NV_2_strain(y+511,"quarter")
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
t_nondim = tnew*flap_freq
f_nondim = freq/flap_freq
y_axis = [t_nondim[fft_range[0]],t_nondim[fft_range[1]],-uplim,uplim]
line = add_plot(11,211,t_nondim,y,titleplot,y_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=3,tickstyle='sci',figsize = [9,9])
plt.setp(line,color = lincols[i])
for l in np.arange(0,10,1):
fft_range = [17999+5000*l,22999+5000*l]
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
f_nondim = freq/flap_freq + add_f[i]
peak_mat[i,l,:] = find_peaks(tnew,y,fft_range)[0]
line = add_plot(11,212,f_nondim,abs(FFT),titleplot,freq_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=2,tickstyle='sci',figsize = [10,10])
plt.setp(line,color = lincols[i])
i = i +1
ratio = np.mean(peak_mat[1,[peak_mat[1,:,0].argsort()[5:]],0])/np.mean(peak_mat[0,[peak_mat[0,:,0].argsort()[5:]],0])
plt.figure(11)
plt.subplot(212)
for i in range(2):
p_mat = peak_mat[i,:,:]
p_top = p_mat[p_mat[:,0].argsort()][5:]
peak_mean = np.mean(p_top,0)
peak_std = np.std(p_top,0)
plt.errorbar([1+add_f[i],2+add_f[i]],peak_mean,peak_std, fmt='o',linewidth=2,color='k', capsize=10,capthick=2, zorder=3)
#%% ----------------------Y_per_compensated------------------------------------------------------------------------------
# refreshed parameters
uplim = round(NV_2_strain(50+511,"quarter"),5)
freq_axis = [0,2.5,0,uplim]
offpeak_mat = np.zeros([4,10])
i = 0
for k in k_range:
file_name = file_dict[k]
SD_data = pd.read_csv(data_dir + file_name + ".CSV")
y = (datafile_analyze("A0",SD_data,tnew) - datafile_analyze("A1",SD_data,tnew))
if (i ==1):
y = y/ratio
y = NV_2_strain(y+511,"quarter")
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
t_nondim = tnew*flap_freq
f_nondim = freq/flap_freq
y_axis = [t_nondim[fft_range[0]],t_nondim[fft_range[1]],-uplim,uplim]
y_axis = [700,710,-uplim,uplim]
line = add_plot(12,211,t_nondim,y,titleplot,y_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=3,tickstyle='sci',figsize = [9,9])
plt.setp(line,color = lincols[i])
find_double_offpeaks(tnew,y,fft_range)
for l in np.arange(0,10,1):
fft_range = [17999+5000*l,22999+5000*l]
freq,FFT = do_fft(tnew,y,fft_range)
flap_freq = find_f(tnew,y,fft_range)
f_nondim = freq/flap_freq + add_f[i]
if(i==1):
offpeak_mat[:,l] = find_double_offpeaks(tnew,y,fft_range)
offpeak_mat[:2,l] = offpeak_mat[:2,l]/flap_freq
peak_mat[i,l,:] = find_peaks(tnew,y,fft_range)[0]
line = add_plot(12,212,f_nondim,abs(FFT),titleplot,freq_axis,xlab = '$\\frac{f}{f_{flap}}$ [-]',ylab = '$\Delta \epsilon$',n_ytics=2,tickstyle='sci',figsize = [10,10])
plt.setp(line,color = lincols[i])
i = i +1
plt.figure(12)
plt.subplot(212)
for i in range(2):
p_mat = peak_mat[i,:,:]
p_top = p_mat[p_mat[:,0].argsort()][5:]
peak_mean = np.mean(p_top,0)
peak_std = np.std(p_top,0)
plt.errorbar([1+add_f[i],2+add_f[i]],peak_mean,peak_std, fmt='o',linewidth=2,color='k', capsize=10,capthick=2, zorder=3)
| mit |
Salman-H/mars-search-robot | code/drive_rover.py | 1 | 8957 | """
Main module for Mars Search Robot.
Gets rover telemetry data and supervises core tasks of
autonomous navigation and mapping
"""
__author__ = 'Salman Hashmi, Ryan Keenan, Curt Welch'
__license__ = 'BSD License'
# Standard library imports
import os
import time
import json
import base64
import shutil
import pickle
import argparse
from datetime import datetime
from io import BytesIO, StringIO
# Related third party imports
import cv2
import socketio
import eventlet
import eventlet.wsgi
import numpy as np
import matplotlib.image as mpimg
from PIL import Image
from flask import Flask
# Local application/library specific imports
from perception import perception_step
import decision_new
from supporting_functions import update_rover, create_output_images
# Initialize socketio server and Flask application
# (learn more at: https://python-socketio.readthedocs.io/en/latest/)
sio = socketio.Server()
app = Flask(__name__)
# Read in ground truth map and create 3-channel green version for overplotting
# NOTE: images are read in by default with the origin (0, 0) in the upper left
# and y-axis increasing downward.
ground_truth = mpimg.imread('../calibration_images/map_bw.png')
# This next line creates arrays of zeros in the red and blue channels
# and puts the map into the green channel. This is why the underlying
# map output looks green in the display image
ground_truth_3d = np.dstack(
(ground_truth*0, ground_truth*255, ground_truth*0)
).astype(np.float)
class RoverTelemetry():
"""
Create a class to be a container for rover state telemetry values.
This allows for tracking telemetry values and results from
perception analysis
"""
def __init__(self):
"""
Initialize a RoverTelemetry instance to retain parameters.
NOTE: distances in meters and angles in degrees
"""
self.start_time = None # To record the start time of navigation
self.total_time = None # To record total duration of navigation
self.img = None # Current camera image
self.pos = None # Current position (x, y)
self.yaw = None # Current yaw angle
self.pitch = None # Current pitch angle
self.roll = None # Current roll angle
self.vel = None # Current velocity (m/s)
self.steer = 0 # Current steering angle
self.throttle = 0 # Current throttle value
self.brake = 0 # Current brake value
self.nav_dists = None # Distances to navigable terrain pixels
self.nav_angles = None # Angles of navigable terrain pixels
self.nav_angles_left = None # Nav terrain angles left of rover heading
self.obs_dists = None # Distances to obstacle terrain pixels
self.obs_angles = None # Angles of obstacle terrain pixels
self.rock_dists = None # Distances to rock terrain pixels
self.rock_angles = None # Angles of rock terrain pixels
self.samples_pos = None # To store the actual sample positions
self.samples_to_find = 0 # To store the initial count of samples
self.samples_collected = 0 # To count the number of samples collected
self.near_sample = 0 # To be set to TLM value data["near_sample"]
self.picking_up = 0 # To be set to TLM value data["picking_up"]
self.send_pickup = False # Set to True to trigger rock pickup
self.home_distance = None # Current distance to starting location
self.home_heading = None # Current heading to starting location
self.going_home = False # Default rover configuration
self.timer_on = False # Timer to determine duration of stuck
self.stuck_heading = 0.0 # Heading at the time of getting stuck
# Rover vision image to be updated with displays of
# intermediate analysis steps on screen in autonomous mode
self.vision_image = np.zeros((160, 320, 3), dtype=np.float)
# Worldmap image to be updated with the positions of
# ROIs navigable terrain, obstacles and rock samples
self.worldmap = np.zeros((200, 200, 3), dtype=np.float)
self.ground_truth = ground_truth_3d # Ground truth worldmap
# To update % of ground truth map successfully found
self.perc_mapped = 0
# Initialize our rover
Rover = RoverTelemetry()
# Initialize decision supervisor
Decider = decision_new.DecisionSupervisor()
# Variables to track frames per second (FPS)
# Initialize frame counter
frame_counter = 0
# Initialize second counter
second_counter = time.time()
fps = None
# Define telemetry function for what to do with incoming data
@sio.on('telemetry')
def telemetry(sid, data):
"""
Handle incoming telemetry data.
Run every time the simulator sends a new batch of data
(nominally 25 times per second)
"""
global frame_counter, second_counter, fps
frame_counter += 1
# Do a rough calculation of frames per second (FPS)
if (time.time() - second_counter) > 1:
fps = frame_counter
frame_counter = 0
second_counter = time.time()
print("Current FPS: {}".format(fps))
if data:
global Rover
# Initialize / update Rover with current telemetry
Rover, image = update_rover(Rover, data)
if np.isfinite(Rover.vel):
# Execute perception and decision steps to update Rover's telemetry
Rover = perception_step(Rover)
Rover = Decider.execute(Rover)
# Create output images to send to server
out_image_strings = create_output_images(Rover, Decider)
out_image_string1, out_image_string2 = out_image_strings
# The action step! Send commands to the rover!
# Don't send both of these, they both trigger the simulator
# to send back new telemetry so we must only send one
# back in response to the current telemetry data.
# If in a state where want to pickup a rock send pickup command
if Rover.send_pickup and not Rover.picking_up:
send_pickup()
Rover.send_pickup = False # Reset Rover flags
else:
# Send commands to the rover!
commands = (Rover.throttle, Rover.brake, Rover.steer)
send_control(commands, out_image_string1, out_image_string2)
# In case of invalid telemetry, send null commands
else:
# Send zeros for throttle, brake and steer and empty images
send_control((0, 0, 0), '', '')
# To save camera images from autonomous driving, specify a path
# Example: $ python drive_rover.py image_folder_path
# Conditional to save image frame if folder was specified
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
image.save('{}.jpg'.format(image_filename))
else:
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
"""Invoke the connect event handler."""
print("connect ", sid)
send_control((0, 0, 0), '', '')
sample_data = {}
sio.emit(
"get_samples",
sample_data,
skip_sid=True)
def send_control(commands, image_string1, image_string2):
"""Send control commands to the rover."""
data = {
'throttle': commands[0].__str__(),
'brake': commands[1].__str__(),
'steering_angle': commands[2].__str__(),
'inset_image1': image_string1,
'inset_image2': image_string2,
}
# Send commands via socketIO server
sio.emit(
"data",
data,
skip_sid=True)
eventlet.sleep(0)
def send_pickup():
"""Send command to pickup rock sample."""
print("Picking up")
pickup = {}
sio.emit(
"pickup",
pickup,
skip_sid=True)
eventlet.sleep(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder.' +
' This is where the images from the run will be saved.'
)
args = parser.parse_args()
#os.system('rm -rf IMG_stream/*')
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("Recording this run ...")
else:
print("NOT recording this run ...")
# wrap Flask application with socketio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| bsd-2-clause |
Vimos/scikit-learn | examples/manifold/plot_mds.py | 88 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# License: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
amitgroup/amitgroup | amitgroup/util/displacement_field_wavelet.py | 1 | 10838 | from __future__ import absolute_import
from __future__ import division
import amitgroup as ag
import numpy as np
from copy import deepcopy
from .displacement_field import DisplacementField
from .interp2d import interp2d
from amitgroup.util import wavelet
class DisplacementFieldWavelet(DisplacementField):
"""
Displacement field using wavelets.
This class requires the package `PyWavelets <http://www.pybytes.com/pywavelets/>`_.
Refer to :class:`DisplacementField` for interface documentation.
Parameters
----------
shape : tuple
Size of the displacement field.
wavelet : string
Specify wavelet type, ``'db1'`` (D2) to ``'db20'`` (D40).
penalty : float
Coefficient signifying the size of the prior. Higher means less deformation.
This is only needed if the derivative is needed.
rho : float
A high value penalizes the prior for higher coarse-to-fine coefficients more.
This is only needed if the derivative is needed.
"""
def __init__(self, shape, wavelet='db2', rho=2.0, penalty=1.0, means=None, variances=None, level_capacity=None):
#super(DisplacementFieldWavelet, self).__init__(shape)
assert means is None or means.ndim == 3
assert variances is None or variances.ndim == 3
self.wavelet = wavelet
self.mode = 'per'
self.shape = shape
self.prepare_shape()
self.rho = rho
self.penalty = penalty
#biggest = self.scriptNs[-1]
self.u = None
self.full_size_means = means
self.full_size_variances = variances
self.reset(level_capacity)
def reset(self, level_capacity):
if level_capacity is None:
self.level_capacity = self.levels
else:
self.level_capacity = level_capacity
N = 1 << self.level_capacity
self.ushape = (2, N, N)
# We divide the penalty, since the raw penalty is the ratio
# of the variance between the coefficients and the loglikelihood.
# It is more natural to want the variance between how much the
# the coefficients can create a deformation in space instead, which
# implies an adjustment of 2**self.levels for the s.d. We take
# the square of this since we're dealing with the variance.
# Notice: Penalty is only applicable if means and variances are not set manually
if self.penalty:
# Removed for now, since the penalty is pretty arbitrary anyway
self.penalty_adjusted = self.penalty# / 4**self.levels
if self.full_size_means is not None:
self.mu = self.full_size_means[:,:N,:N]
else:
self.mu = np.zeros(self.ushape)
if self.full_size_variances is not None:
self.lmbks = 1/self.full_size_variances[:,:N,:N]
else:
self._init_default_lmbks()
self._init_u()
@classmethod
def shape_for_size(cls, size, level_capacity=np.inf):
# TODO: Is this function used? It won't play nice if level_capacity has its default value.
N = 1 << level_capacity
return (2, N, N)
def _init_u(self):
new_u = np.copy(self.mu)
if self.u is not None:
# Resizes the coefficients, and fills with self.mu
A, B = min(new_u.shape[1], self.u.shape[1]), min(new_u.shape[2], self.u.shape[2])
new_u[:,:A,:B] = self.u[:,:A,:B]
self.u = new_u
@classmethod
def make_lambdas(cls, shape, levels=None, eta=1.0, rho=1.0):
if levels is None:
levels = int(np.log2(max(shape)))
N = 1 << levels
lambdas = np.zeros((N, N))
for level in range(levels, -1, -1):
S = 1 << level
lambdas[:S,:S] = eta * 2.0**(rho * level)
return lambdas
def _init_default_lmbks(self):
self.lmbks = np.zeros(self.ushape)
for level in range(self.levels, -1, -1):
N = 2**level
self.lmbks[:,:N,:N] = self.penalty_adjusted * 2.0**(self.rho * level)
#print self.lmbks
def set_flat_u(self, flat_u, level):
"""
Sets `u` from a flattened array of a subset of `u`.
The size of the subset is determined by level. The rest of `u` is filled with zeros.
"""
assert level <= self.level_capacity, "Please increase coefficient capacity for this level"
# First reset
# TODO: This might not be needed either
self.u.fill(0.0)
#shape = self.coef_shape(level)
N = 1 << level
# TODO: Should not need 2*N*N
self.u.shape, flat_u.shape, N
self.u[:,:N,:N] = flat_u[:2*N*N].reshape((2, N, N))
def prepare_shape(self):
side = max(self.shape)
self.levels = int(np.log2(side))
self.levelshape = tuple(map(int, map(np.log2, self.shape)))
#self.scriptNs = map(len, pywt.wavedec(np.zeros(side), self.wavelet, level=self.levels, mode=self.mode))
def deform_x(self, x0, x1, last_level=np.inf):
last_level = min(last_level, self.level_capacity)
Ux0, Ux1 = self.invtransform(x0, x1, last_level)
return x0+Ux0, x1+Ux1
def deform_map(self, x, y, last_level=np.inf):
last_level = min(last_level, self.level_capacity)
return self.invtransform(x, y, last_level)
def transform(self, f, level):
"""
Forward transform of the wavelet.
"""
new = np.empty(self.ushape)
S = 1 << level
# TODO: Slicing should not be necessary
new[0,:S,:S] = ag.util.wavelet.wavedec2(f[0], self.wavelet, level, shape=self.shape)
new[1,:S,:S] = ag.util.wavelet.wavedec2(f[1], self.wavelet, level, shape=self.shape)
return new
# TODO: last_level not used
def invtransform(self, x, y, last_level=np.inf):
"""See :func:`DisplacementField.deform_map`"""
Ux = ag.util.wavelet.waverec2(self.u[0], self.wavelet, shape=self.shape)
Uy = ag.util.wavelet.waverec2(self.u[1], self.wavelet, shape=self.shape)
return Ux, Uy
def deform(self, F, levels=np.inf):
"""See :func:`DisplacementField.deform`"""
im = np.zeros(F.shape)
x0, x1 = self.meshgrid()
z0, z1 = self.deform_x(x0, x1, levels)
im = interp2d(z0, z1, F)
return im
def abridged_u(self, levels=None):
#return self.u[:,:self.flat_limit(last_level)]
S = 1 << levels
return self.u[:,:S,:S].copy()
def coef_shape(self, last_level=None):
return (self.ushape[0], self.flat_limit(last_level))
def logprior(self, last_level=None):
N = None if last_level is None else 1 << last_level
return -(self.lmbks * (self.u - self.mu)**2)[:,:N,:N].sum() / 2
def logprior_derivative(self, last_level=None):
N = None if last_level is None else 1 << last_level
ret = (-self.lmbks * (self.u - self.mu))[:,:N,:N]
return ret
def sum_of_coefficients(self, last_level=None):
# Return only lmbks[0], because otherwise we'll double-count every
# value (since they are the same)
return self.lmbks[0,:self.flat_limit(last_level)].sum()
def number_of_coefficients(self, levels=None):
return self.ushape[1]
def copy(self):
return deepcopy(self)
def flat_limit(self, last_level=None):
# TODO: Come up with better name, and maybe place
return None if last_level is None else _flat_start(last_level+1, 0, self.levelshape)
def randomize(self, sigma=0.01, rho=2.5, start_level=1, levels=3):
"""
Randomly sets the coefficients up to a certain level by sampling a Gaussian.
Parameters
----------
sigma : float
Standard deviation of the Gaussian. The `sigma` is adjusted to a normalized image
scale and not the scale of coefficient values (nor pixels). This means that setting `sigma` to 1, the standard
deviation is the same size as the image, which is a lot. A more appropriate value is
thus 0.01.
rho : float
A value higher than 1, will cause more dampening for higher coefficients, which will
result in a smoother deformation.
levels: int
Number of levels that should be randomized. The levels above will be set to zero. For a funny-mirror-type deformation, this should be limited to about 3.
Examples
--------
>>> import amitgroup as ag
>>> import matplotlib.pylab as plt
Generate 9 randomly altered faces.
>>> face = ag.io.load_example('faces')[0]
>>> imdef = ag.util.DisplacementFieldWavelet(face.shape, 'db8')
>>> ag.plot.images([imdef.randomize(0.1).deform(face) for i in range(9)])
>>> plt.show()
"""
# Reset all values first
self.u.fill(0.0)
end_level = min(self.levels+1, start_level+levels)
for q in range(2):
for level in range(end_level, start_level-1, -1):
N = 1 << level
# First of all, a coefficient of 1, will be shift the image 1/2**self.levels,
# so first we have to adjust for that.
# Secondly, higher coefficient should be adjusted by roughly 2**-s, to account
# for the different amplitudes of a wavelet basis (energy-conserving reasons).
# Finally, we might want to dampen higher coefficients even further, to create
# a smoother image. This is done by rho.
adjust = 2.0**(self.levels - rho * max(level-1, 0))
self.u[:,:N,:N] = np.random.normal(0.0, sigma, (2, N, N)) * adjust
return self
def ilevels(self):
for level in range(self.levels+1):
alphas = 1 if level == 0 else 3
yield level, (alphas,)+_levels2shape(self.levelshape, level)
def print_lmbks(self, last_level=np.inf):
for level, (alphas, N, M) in self.ilevels():
if level == last_level:
break
def print_u(self, last_level=np.inf):
for level, (alphas, N, M) in self.ilevels():
if level == last_level:
break
# TODO: The name 'u' for the coefficients is congruent with the book,
# but a bit confusing for other people. Change.
def ulevel(self, level):
alphas = 1 if level == 0 else 3
size = _levels2shape(self.levelshape, level)
#TODO: return self.u[:,level,:alphas,:size[0],:size[1]]
def lmbk_level(self, level):
alphas = 1 if level == 0 else 3
size = _levels2shape(self.levelshape, level)
return self.lmbks[:,level,:alphas,:size[0],:size[1]]
| bsd-3-clause |
olafhauk/mne-python | mne/channels/tests/test_montage.py | 3 | 54368 | # Author: Teon Brooks <[email protected]>
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
from itertools import chain
import os
import os.path as op
import pytest
import numpy as np
from functools import partial
from string import ascii_lowercase
from numpy.testing import (assert_array_equal,
assert_allclose, assert_equal)
import matplotlib.pyplot as plt
from mne import __file__ as _mne_file, create_info, read_evokeds, pick_types
from mne.fixes import nullcontext
from mne.utils._testing import assert_object_equal
from mne.channels import (get_builtin_montages, DigMontage, read_dig_dat,
read_dig_egi, read_dig_captrak, read_dig_fif,
make_standard_montage, read_custom_montage,
compute_dev_head_t, make_dig_montage,
read_dig_polhemus_isotrak, compute_native_head_t,
read_polhemus_fastscan,
read_dig_hpts)
from mne.channels.montage import transform_to_head, _check_get_coord_frame
from mne.utils import run_tests_if_main, assert_dig_allclose
from mne.bem import _fit_sphere
from mne.io.constants import FIFF
from mne.io._digitization import (_format_dig_points,
_get_fid_coords, _get_dig_eeg,
_count_points_by_type)
from mne.transforms import _ensure_trans
from mne.viz._3d import _fiducial_coords
from mne.io.kit import read_mrk
from mne.io import (read_raw_brainvision, read_raw_egi, read_raw_fif,
read_fiducials, __file__ as _MNE_IO_FILE)
from mne.io import RawArray
from mne.datasets import testing
from mne.io.brainvision import __file__ as _BRAINVISON_FILE
data_path = testing.data_path(download=False)
fif_dig_montage_fname = op.join(data_path, 'montage', 'eeganes07.fif')
egi_dig_montage_fname = op.join(data_path, 'montage', 'coordinates.xml')
egi_raw_fname = op.join(data_path, 'montage', 'egi_dig_test.raw')
egi_fif_fname = op.join(data_path, 'montage', 'egi_dig_raw.fif')
bvct_dig_montage_fname = op.join(data_path, 'montage', 'captrak_coords.bvct')
bv_raw_fname = op.join(data_path, 'montage', 'bv_dig_test.vhdr')
bv_fif_fname = op.join(data_path, 'montage', 'bv_dig_raw.fif')
locs_montage_fname = op.join(data_path, 'EEGLAB', 'test_chans.locs')
evoked_fname = op.join(data_path, 'montage', 'level2_raw-ave.fif')
eeglab_fname = op.join(data_path, 'EEGLAB', 'test_raw.set')
bdf_fname1 = op.join(data_path, 'BDF', 'test_generator_2.bdf')
bdf_fname2 = op.join(data_path, 'BDF', 'test_bdf_stim_channel.bdf')
egi_fname1 = op.join(data_path, 'EGI', 'test_egi.mff')
cnt_fname = op.join(data_path, 'CNT', 'scan41_short.cnt')
io_dir = op.dirname(_MNE_IO_FILE)
kit_dir = op.join(io_dir, 'kit', 'tests', 'data')
elp = op.join(kit_dir, 'test_elp.txt')
hsp = op.join(kit_dir, 'test_hsp.txt')
hpi = op.join(kit_dir, 'test_mrk.sqd')
bv_fname = op.join(io_dir, 'brainvision', 'tests', 'data', 'test.vhdr')
fif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif')
edf_path = op.join(io_dir, 'edf', 'tests', 'data', 'test.edf')
bdf_path = op.join(io_dir, 'edf', 'tests', 'data', 'test_bdf_eeglab.mat')
egi_fname2 = op.join(io_dir, 'egi', 'tests', 'data', 'test_egi.raw')
vhdr_path = op.join(io_dir, 'brainvision', 'tests', 'data', 'test.vhdr')
ctf_fif_fname = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
nicolet_fname = op.join(io_dir, 'nicolet', 'tests', 'data',
'test_nicolet_raw.data')
def _make_toy_raw(n_channels):
return RawArray(
data=np.empty([n_channels, 1]),
info=create_info(
ch_names=list(ascii_lowercase[:n_channels]),
sfreq=1, ch_types='eeg'
)
)
def _make_toy_dig_montage(n_channels, **kwargs):
return make_dig_montage(
ch_pos=dict(zip(
list(ascii_lowercase[:n_channels]),
np.arange(n_channels * 3).reshape(n_channels, 3),
)),
**kwargs
)
def _get_dig_montage_pos(montage):
return np.array([d['r'] for d in _get_dig_eeg(montage.dig)])
def test_dig_montage_trans(tmpdir):
"""Test getting a trans from montage."""
nasion, lpa, rpa, *ch_pos = np.random.RandomState(0).randn(10, 3)
ch_pos = {f'EEG{ii:3d}': pos for ii, pos in enumerate(ch_pos, 1)}
montage = make_dig_montage(ch_pos, nasion=nasion, lpa=lpa, rpa=rpa,
coord_frame='mri')
trans = compute_native_head_t(montage)
_ensure_trans(trans)
# ensure that we can save and load it, too
fname = tmpdir.join('temp-mon.fif')
_check_roundtrip(montage, fname, 'mri')
def test_fiducials():
"""Test handling of fiducials."""
# Eventually the code used here should be unified with montage.py, but for
# now it uses code in odd places
for fname in (fif_fname, ctf_fif_fname):
fids, coord_frame = read_fiducials(fname)
points = _fiducial_coords(fids, coord_frame)
assert points.shape == (3, 3)
# Fids
assert_allclose(points[:, 2], 0., atol=1e-6)
assert_allclose(points[::2, 1], 0., atol=1e-6)
assert points[2, 0] > 0 # RPA
assert points[0, 0] < 0 # LPA
# Nasion
assert_allclose(points[1, 0], 0., atol=1e-6)
assert points[1, 1] > 0
def test_documented():
"""Test that standard montages are documented."""
docs = make_standard_montage.__doc__
lines = [line[4:] for line in docs.splitlines()]
start = stop = None
for li, line in enumerate(lines):
if line.startswith('====') and li < len(lines) - 2 and \
lines[li + 1].startswith('Kind') and\
lines[li + 2].startswith('===='):
start = li + 3
elif start is not None and li > start and line.startswith('===='):
stop = li
break
assert (start is not None)
assert (stop is not None)
kinds = [line.split(' ')[0] for line in lines[start:stop]]
kinds = [kind for kind in kinds if kind != '']
montages = os.listdir(op.join(op.dirname(_mne_file), 'channels', 'data',
'montages'))
montages = sorted(op.splitext(m)[0] for m in montages)
assert_equal(len(set(montages)), len(montages))
assert_equal(len(set(kinds)), len(kinds), err_msg=str(sorted(kinds)))
assert_equal(set(montages), set(kinds))
@pytest.mark.parametrize('reader, file_content, expected_dig, ext, warning', [
pytest.param(
partial(read_custom_montage, head_size=None),
('FidNz 0 9.071585155 -2.359754454\n'
'FidT9 -6.711765 0.040402876 -3.251600355\n'
'very_very_very_long_name -5.831241498 -4.494821698 4.955347697\n'
'Cz 0 0 1\n'
'Cz 0 0 8.899186843'),
make_dig_montage(
ch_pos={
'very_very_very_long_name': [-5.8312416, -4.4948215, 4.9553475], # noqa
'Cz': [0., 0., 8.899187],
},
nasion=[0., 9.071585, -2.3597546],
lpa=[-6.711765, 0.04040287, -3.2516003],
rpa=None,
),
'sfp',
(RuntimeWarning, r'Duplicate.*last will be used for Cz \(2\)'),
id='sfp_duplicate'),
pytest.param(
partial(read_custom_montage, head_size=None),
('FidNz 0 9.071585155 -2.359754454\n'
'FidT9 -6.711765 0.040402876 -3.251600355\n'
'headshape 1 2 3\n'
'headshape 4 5 6\n'
'Cz 0 0 8.899186843'),
make_dig_montage(
hsp=[
[1, 2, 3],
[4, 5, 6],
],
ch_pos={
'Cz': [0., 0., 8.899187],
},
nasion=[0., 9.071585, -2.3597546],
lpa=[-6.711765, 0.04040287, -3.2516003],
rpa=None,
),
'sfp',
None,
id='sfp_headshape'),
pytest.param(
partial(read_custom_montage, head_size=1),
('1 0 0.50669 FPz\n'
'2 23 0.71 EOG1\n'
'3 -39.947 0.34459 F3\n'
'4 0 0.25338 Fz\n'),
make_dig_montage(
ch_pos={
'EOG1': [0.30873816, 0.72734152, -0.61290705],
'F3': [-0.56705965, 0.67706631, 0.46906776],
'FPz': [0., 0.99977915, -0.02101571],
'Fz': [0., 0.71457525, 0.69955859],
},
nasion=None, lpa=None, rpa=None, coord_frame='head',
),
'loc',
None,
id='EEGLAB'),
pytest.param(
partial(read_custom_montage, head_size=None, coord_frame='mri'),
('// MatLab Sphere coordinates [degrees] Cartesian coordinates\n' # noqa: E501
'// Label Theta Phi Radius X Y Z off sphere surface\n' # noqa: E501
'E1 37.700 -14.000 1.000 0.7677 0.5934 -0.2419 -0.00000000000000011\n' # noqa: E501
'E3 51.700 11.000 1.000 0.6084 0.7704 0.1908 0.00000000000000000\n' # noqa: E501
'E31 90.000 -11.000 1.000 0.0000 0.9816 -0.1908 0.00000000000000000\n' # noqa: E501
'E61 158.000 -17.200 1.000 -0.8857 0.3579 -0.2957 -0.00000000000000022'), # noqa: E501
make_dig_montage(
ch_pos={
'E1': [0.7677, 0.5934, -0.2419],
'E3': [0.6084, 0.7704, 0.1908],
'E31': [0., 0.9816, -0.1908],
'E61': [-0.8857, 0.3579, -0.2957],
},
nasion=None, lpa=None, rpa=None, coord_frame='mri',
),
'csd',
None,
id='matlab'),
pytest.param(
partial(read_custom_montage, head_size=None),
('# ASA electrode file\nReferenceLabel avg\nUnitPosition mm\n'
'NumberPositions= 68\n'
'Positions\n'
'-86.0761 -19.9897 -47.9860\n'
'85.7939 -20.0093 -48.0310\n'
'0.0083 86.8110 -39.9830\n'
'-86.0761 -24.9897 -67.9860\n'
'Labels\nLPA\nRPA\nNz\nDummy\n'),
make_dig_montage(
ch_pos={
'Dummy': [-0.0860761, -0.0249897, -0.067986],
},
nasion=[8.3000e-06, 8.6811e-02, -3.9983e-02],
lpa=[-0.0860761, -0.0199897, -0.047986],
rpa=[0.0857939, -0.0200093, -0.048031],
),
'elc',
None,
id='ASA electrode'),
pytest.param(
partial(read_custom_montage, head_size=1),
('Site Theta Phi\n'
'Fp1 -92 -72\n'
'Fp2 92 72\n'
'very_very_very_long_name -92 72\n'
'O2 92 -90\n'),
make_dig_montage(
ch_pos={
'Fp1': [-0.30882875, 0.95047716, -0.0348995],
'Fp2': [0.30882875, 0.95047716, -0.0348995],
'very_very_very_long_name': [-0.30882875, -0.95047716, -0.0348995], # noqa
'O2': [6.11950389e-17, -9.99390827e-01, -3.48994967e-02]
},
nasion=None, lpa=None, rpa=None,
),
'txt',
None,
id='generic theta-phi (txt)'),
pytest.param(
partial(read_custom_montage, head_size=None),
('346\n' # XXX: this should actually race an error 346 != 4
'FID\t LPA\t -120.03\t 0\t 85\n'
'FID\t RPA\t 120.03\t 0\t 85\n'
'FID\t Nz\t 114.03\t 90\t 85\n'
'EEG\t F3\t -62.027\t -50.053\t 85\n'
'EEG\t Fz\t 45.608\t 90\t 85\n'
'EEG\t F4\t 62.01\t 50.103\t 85\n'
'EEG\t FCz\t 68.01\t 58.103\t 85\n'),
make_dig_montage(
ch_pos={
'F3': [-0.48200427, 0.57551063, 0.39869712],
'Fz': [3.71915931e-17, 6.07384809e-01, 5.94629038e-01],
'F4': [0.48142596, 0.57584026, 0.39891983],
'FCz': [0.41645989, 0.66914889, 0.31827805],
},
nasion=[4.75366562e-17, 7.76332511e-01, -3.46132681e-01],
lpa=[-7.35898963e-01, 9.01216309e-17, -4.25385374e-01],
rpa=[0.73589896, 0., -0.42538537],
),
'elp',
None,
id='BESA spherical model'),
pytest.param(
partial(read_dig_hpts, unit='m'),
('eeg Fp1 -95.0 -3. -3.\n'
'eeg AF7 -1 -1 -3\n'
'eeg A3 -2 -2 2\n'
'eeg A 0 0 0'),
make_dig_montage(
ch_pos={
'A': [0., 0., 0.], 'A3': [-2., -2., 2.],
'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.],
},
nasion=None, lpa=None, rpa=None,
),
'hpts',
None,
id='legacy mne-c'),
pytest.param(
partial(read_custom_montage, head_size=None),
('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n'
'<!-- Generated by EasyCap Configurator 19.05.2014 -->\n'
'<Electrodes defaults="false">\n'
' <Electrode>\n'
' <Name>Fp1</Name>\n'
' <Theta>-90</Theta>\n'
' <Phi>-72</Phi>\n'
' <Radius>1</Radius>\n'
' <Number>1</Number>\n'
' </Electrode>\n'
' <Electrode>\n'
' <Name>Fz</Name>\n'
' <Theta>45</Theta>\n'
' <Phi>90</Phi>\n'
' <Radius>1</Radius>\n'
' <Number>2</Number>\n'
' </Electrode>\n'
' <Electrode>\n'
' <Name>F3</Name>\n'
' <Theta>-60</Theta>\n'
' <Phi>-51</Phi>\n'
' <Radius>1</Radius>\n'
' <Number>3</Number>\n'
' </Electrode>\n'
' <Electrode>\n'
' <Name>F7</Name>\n'
' <Theta>-90</Theta>\n'
' <Phi>-36</Phi>\n'
' <Radius>1</Radius>\n'
' <Number>4</Number>\n'
' </Electrode>\n'
'</Electrodes>'),
make_dig_montage(
ch_pos={
'Fp1': [-3.09016994e-01, 9.51056516e-01, 6.12323400e-17],
'Fz': [4.32978028e-17, 7.07106781e-01, 7.07106781e-01],
'F3': [-0.54500745, 0.67302815, 0.5],
'F7': [-8.09016994e-01, 5.87785252e-01, 6.12323400e-17],
},
nasion=None, lpa=None, rpa=None,
),
'bvef',
None,
id='brainvision'),
])
def test_montage_readers(
reader, file_content, expected_dig, ext, warning, tmpdir
):
"""Test that we have an equivalent of read_montage for all file formats."""
fname = op.join(str(tmpdir), 'test.{ext}'.format(ext=ext))
with open(fname, 'w') as fid:
fid.write(file_content)
if warning is None:
ctx = nullcontext()
else:
ctx = pytest.warns(warning[0], match=warning[1])
with ctx:
dig_montage = reader(fname)
assert isinstance(dig_montage, DigMontage)
actual_ch_pos = dig_montage._get_ch_pos()
expected_ch_pos = expected_dig._get_ch_pos()
for kk in actual_ch_pos:
assert_allclose(actual_ch_pos[kk], expected_ch_pos[kk], atol=1e-5)
assert len(dig_montage.dig) == len(expected_dig.dig)
for d1, d2 in zip(dig_montage.dig, expected_dig.dig):
assert d1['coord_frame'] == d2['coord_frame']
for key in ('coord_frame', 'ident', 'kind'):
assert isinstance(d1[key], int)
assert isinstance(d2[key], int)
with pytest.warns(None) as w:
xform = compute_native_head_t(dig_montage)
assert xform['to'] == FIFF.FIFFV_COORD_HEAD
assert xform['from'] == FIFF.FIFFV_COORD_UNKNOWN
n = int(np.allclose(xform['trans'], np.eye(4)))
assert len(w) == n
@testing.requires_testing_data
def test_read_locs():
"""Test reading EEGLAB locs."""
data = read_custom_montage(locs_montage_fname)._get_ch_pos()
assert_allclose(
actual=np.stack(
[data[kk] for kk in ('FPz', 'EOG1', 'F3', 'Fz')] # 4 random chs
),
desired=[[0., 0.094979, -0.001996],
[0.02933, 0.069097, -0.058226],
[-0.053871, 0.064321, 0.044561],
[0., 0.067885, 0.066458]],
atol=1e-6
)
def test_read_dig_dat(tmpdir):
"""Test reading *.dat electrode locations."""
rows = [
['Nasion', 78, 0.00, 1.00, 0.00],
['Left', 76, -1.00, 0.00, 0.00],
['Right', 82, 1.00, -0.00, 0.00],
['O2', 69, -0.50, -0.90, 0.05],
['O2', 68, 0.00, 0.01, 0.02],
['Centroid', 67, 0.00, 0.00, 0.00],
]
# write mock test.dat file
temp_dir = str(tmpdir)
fname_temp = op.join(temp_dir, 'test.dat')
with open(fname_temp, 'w') as fid:
for row in rows:
name = row[0].rjust(10)
data = '\t'.join(map(str, row[1:]))
fid.write("%s\t%s\n" % (name, data))
# construct expected value
idents = {
78: FIFF.FIFFV_POINT_NASION,
76: FIFF.FIFFV_POINT_LPA,
82: FIFF.FIFFV_POINT_RPA,
68: 1,
69: 1,
}
kinds = {
78: FIFF.FIFFV_POINT_CARDINAL,
76: FIFF.FIFFV_POINT_CARDINAL,
82: FIFF.FIFFV_POINT_CARDINAL,
69: FIFF.FIFFV_POINT_EEG,
68: FIFF.FIFFV_POINT_EEG,
}
target = {row[0]: {'r': row[2:], 'ident': idents[row[1]],
'kind': kinds[row[1]], 'coord_frame': 0}
for row in rows[:-1]}
assert_allclose(target['O2']['r'], [0, 0.01, 0.02])
# read it
with pytest.warns(RuntimeWarning, match=r'Duplic.*for O2 \(2\)'):
dig = read_dig_dat(fname_temp)
assert set(dig.ch_names) == {'O2'}
keys = chain(['Left', 'Nasion', 'Right'], dig.ch_names)
target = [target[k] for k in keys]
assert dig.dig == target
def test_read_dig_montage_using_polhemus_fastscan():
"""Test FastScan."""
N_EEG_CH = 10
my_electrode_positions = read_polhemus_fastscan(
op.join(kit_dir, 'test_elp.txt')
)
montage = make_dig_montage(
# EEG_CH
ch_pos=dict(zip(ascii_lowercase[:N_EEG_CH],
np.random.RandomState(0).rand(N_EEG_CH, 3))),
# NO NAMED points
nasion=my_electrode_positions[0],
lpa=my_electrode_positions[1],
rpa=my_electrode_positions[2],
hpi=my_electrode_positions[3:],
hsp=read_polhemus_fastscan(op.join(kit_dir, 'test_hsp.txt')),
# Other defaults
coord_frame='unknown'
)
assert repr(montage) == (
'<DigMontage | '
'500 extras (headshape), 5 HPIs, 3 fiducials, 10 channels>'
) # XXX: is this wrong? extra is not in headspace, is it?
assert set([d['coord_frame'] for d in montage.dig]) == {
FIFF.FIFFV_COORD_UNKNOWN
} # XXX: so far we build everything in 'unknown'
EXPECTED_FID_IN_POLHEMUS = {
'nasion': [0.001393, 0.0131613, -0.0046967],
'lpa': [-0.0624997, -0.0737271, 0.07996],
'rpa': [-0.0748957, 0.0873785, 0.0811943],
}
fiducials, fid_coordframe = _get_fid_coords(montage.dig)
assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN
for kk, val in fiducials.items():
assert_allclose(val, EXPECTED_FID_IN_POLHEMUS[kk])
def test_read_dig_montage_using_polhemus_fastscan_error_handling(tmpdir):
"""Test reading Polhemus FastSCAN errors."""
with open(op.join(kit_dir, 'test_elp.txt')) as fid:
content = fid.read().replace('FastSCAN', 'XxxxXXXX')
fname = str(tmpdir.join('faulty_FastSCAN.txt'))
with open(fname, 'w') as fid:
fid.write(content)
with pytest.raises(ValueError, match='not contain.*Polhemus FastSCAN'):
_ = read_polhemus_fastscan(fname)
EXPECTED_ERR_MSG = "allowed value is '.txt', but got '.bar' instead"
with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):
_ = read_polhemus_fastscan(fname=tmpdir.join('foo.bar'))
def test_read_dig_polhemus_isotrak_hsp():
"""Test reading Polhemus IsoTrak HSP file."""
EXPECTED_FID_IN_POLHEMUS = {
'nasion': np.array([1.1056e-01, -5.4210e-19, 0]),
'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]),
'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]),
}
montage = read_dig_polhemus_isotrak(fname=op.join(kit_dir, 'test.hsp'),
ch_names=None)
assert repr(montage) == (
'<DigMontage | '
'500 extras (headshape), 0 HPIs, 3 fiducials, 0 channels>'
)
fiducials, fid_coordframe = _get_fid_coords(montage.dig)
assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN
for kk, val in fiducials.items():
assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk])
def test_read_dig_polhemus_isotrak_elp():
"""Test reading Polhemus IsoTrak ELP file."""
EXPECTED_FID_IN_POLHEMUS = {
'nasion': np.array([1.1056e-01, -5.4210e-19, 0]),
'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]),
'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]),
}
montage = read_dig_polhemus_isotrak(fname=op.join(kit_dir, 'test.elp'),
ch_names=None)
assert repr(montage) == (
'<DigMontage | '
'0 extras (headshape), 5 HPIs, 3 fiducials, 0 channels>'
)
fiducials, fid_coordframe = _get_fid_coords(montage.dig)
assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN
for kk, val in fiducials.items():
assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk])
@pytest.fixture(scope='module')
def isotrak_eeg(tmpdir_factory):
"""Mock isotrak file with EEG positions."""
_SEED = 42
N_ROWS, N_COLS = 5, 3
content = np.random.RandomState(_SEED).randn(N_ROWS, N_COLS)
fname = tmpdir_factory.mktemp('data').join('test.eeg')
with open(str(fname), 'w') as fid:
fid.write((
'3 200\n'
'//Shape file\n'
'//Minor revision number\n'
'2\n'
'//Subject Name\n'
'%N Name \n'
'////Shape code, number of digitized points\n'
))
fid.write('0 {rows:d}\n'.format(rows=N_ROWS))
fid.write((
'//Position of fiducials X+, Y+, Y- on the subject\n'
'%F 0.11056 -5.421e-19 0 \n'
'%F -0.00021075 0.080793 -7.5894e-19 \n'
'%F 0.00021075 -0.080793 -2.8731e-18 \n'
'//No of rows, no of columns; position of digitized points\n'
))
fid.write('{rows:d} {cols:d}\n'.format(rows=N_ROWS, cols=N_COLS))
for row in content:
fid.write('\t'.join('%0.18e' % cell for cell in row) + '\n')
return str(fname)
def test_read_dig_polhemus_isotrak_eeg(isotrak_eeg):
"""Test reading Polhemus IsoTrak EEG positions."""
N_CHANNELS = 5
_SEED = 42
EXPECTED_FID_IN_POLHEMUS = {
'nasion': np.array([1.1056e-01, -5.4210e-19, 0]),
'lpa': np.array([-2.1075e-04, 8.0793e-02, -7.5894e-19]),
'rpa': np.array([2.1075e-04, -8.0793e-02, -2.8731e-18]),
}
ch_names = ['eeg {:01d}'.format(ii) for ii in range(N_CHANNELS)]
EXPECTED_CH_POS = dict(zip(
ch_names, np.random.RandomState(_SEED).randn(N_CHANNELS, 3)))
montage = read_dig_polhemus_isotrak(fname=isotrak_eeg, ch_names=ch_names)
assert repr(montage) == (
'<DigMontage | '
'0 extras (headshape), 0 HPIs, 3 fiducials, 5 channels>'
)
fiducials, fid_coordframe = _get_fid_coords(montage.dig)
assert fid_coordframe == FIFF.FIFFV_COORD_UNKNOWN
for kk, val in fiducials.items():
assert_array_equal(val, EXPECTED_FID_IN_POLHEMUS[kk])
for kk, dig_point in zip(montage.ch_names, _get_dig_eeg(montage.dig)):
assert_array_equal(dig_point['r'], EXPECTED_CH_POS[kk])
assert dig_point['coord_frame'] == FIFF.FIFFV_COORD_UNKNOWN
def test_read_dig_polhemus_isotrak_error_handling(isotrak_eeg, tmpdir):
"""Test errors in reading Polhemus IsoTrak files.
1 - matching ch_names and number of points in isotrak file.
2 - error for unsupported file extensions.
"""
# Check ch_names
N_CHANNELS = 5
EXPECTED_ERR_MSG = "not match the number of points.*Expected.*5, given 47"
with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):
_ = read_dig_polhemus_isotrak(
fname=isotrak_eeg,
ch_names=['eeg {:01d}'.format(ii) for ii in range(N_CHANNELS + 42)]
)
# Check fname extensions
fname = op.join(tmpdir, 'foo.bar')
with pytest.raises(
ValueError,
match="Allowed val.*'.hsp', '.elp', and '.eeg', but got '.bar' instead"
):
_ = read_dig_polhemus_isotrak(fname=fname, ch_names=None)
def test_combining_digmontage_objects():
"""Test combining different DigMontage objects."""
rng = np.random.RandomState(0)
fiducials = dict(zip(('nasion', 'lpa', 'rpa'), rng.rand(3, 3)))
# hsp positions are [1X, 1X, 1X]
hsp1 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 11.))
hsp2 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 12.))
hsp3 = make_dig_montage(**fiducials, hsp=np.full((2, 3), 13.))
# hpi positions are [2X, 2X, 2X]
hpi1 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 21.))
hpi2 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 22.))
hpi3 = make_dig_montage(**fiducials, hpi=np.full((2, 3), 23.))
# channels have positions at 40s, 50s, and 60s.
ch_pos1 = make_dig_montage(
**fiducials,
ch_pos={'h': [41, 41, 41], 'b': [42, 42, 42], 'g': [43, 43, 43]}
)
ch_pos2 = make_dig_montage(
**fiducials,
ch_pos={'n': [51, 51, 51], 'y': [52, 52, 52], 'p': [53, 53, 53]}
)
ch_pos3 = make_dig_montage(
**fiducials,
ch_pos={'v': [61, 61, 61], 'a': [62, 62, 62], 'l': [63, 63, 63]}
)
montage = (
DigMontage() + hsp1 + hsp2 + hsp3 + hpi1 + hpi2 + hpi3 + ch_pos1 +
ch_pos2 + ch_pos3
)
assert repr(montage) == (
'<DigMontage | '
'6 extras (headshape), 6 HPIs, 3 fiducials, 9 channels>'
)
EXPECTED_MONTAGE = make_dig_montage(
**fiducials,
hsp=np.concatenate([np.full((2, 3), 11.), np.full((2, 3), 12.),
np.full((2, 3), 13.)]),
hpi=np.concatenate([np.full((2, 3), 21.), np.full((2, 3), 22.),
np.full((2, 3), 23.)]),
ch_pos={
'h': [41, 41, 41], 'b': [42, 42, 42], 'g': [43, 43, 43],
'n': [51, 51, 51], 'y': [52, 52, 52], 'p': [53, 53, 53],
'v': [61, 61, 61], 'a': [62, 62, 62], 'l': [63, 63, 63],
}
)
# Do some checks to ensure they are the same DigMontage
assert len(montage.ch_names) == len(EXPECTED_MONTAGE.ch_names)
assert all([c in montage.ch_names for c in EXPECTED_MONTAGE.ch_names])
actual_occurrences = _count_points_by_type(montage.dig)
expected_occurrences = _count_points_by_type(EXPECTED_MONTAGE.dig)
assert actual_occurrences == expected_occurrences
def test_combining_digmontage_forbiden_behaviors():
"""Test combining different DigMontage objects with repeated names."""
rng = np.random.RandomState(0)
fiducials = dict(zip(('nasion', 'lpa', 'rpa'), rng.rand(3, 3)))
dig1 = make_dig_montage(
**fiducials,
ch_pos=dict(zip(list('abc'), rng.rand(3, 3))),
)
dig2 = make_dig_montage(
**fiducials,
ch_pos=dict(zip(list('bcd'), rng.rand(3, 3))),
)
dig2_wrong_fid = make_dig_montage(
nasion=rng.rand(3), lpa=rng.rand(3), rpa=rng.rand(3),
ch_pos=dict(zip(list('ghi'), rng.rand(3, 3))),
)
dig2_wrong_coordframe = make_dig_montage(
**fiducials,
ch_pos=dict(zip(list('ghi'), rng.rand(3, 3))),
coord_frame='meg'
)
EXPECTED_ERR_MSG = "Cannot.*duplicated channel.*found: \'b\', \'c\'."
with pytest.raises(RuntimeError, match=EXPECTED_ERR_MSG):
_ = dig1 + dig2
with pytest.raises(RuntimeError, match='fiducial locations do not match'):
_ = dig1 + dig2_wrong_fid
with pytest.raises(RuntimeError, match='not in the same coordinate '):
_ = dig1 + dig2_wrong_coordframe
def test_set_dig_montage():
"""Test setting DigMontage with toy understandable points."""
N_CHANNELS, N_HSP, N_HPI = 3, 2, 1
ch_names = list(ascii_lowercase[:N_CHANNELS])
ch_pos = dict(zip(
ch_names,
np.arange(N_CHANNELS * 3).reshape(N_CHANNELS, 3),
))
montage_ch_only = make_dig_montage(ch_pos=ch_pos, coord_frame='head')
assert repr(montage_ch_only) == (
'<DigMontage | 0 extras (headshape), 0 HPIs, 0 fiducials, 3 channels>'
)
info = create_info(ch_names, sfreq=1, ch_types='eeg')
info.set_montage(montage_ch_only)
assert len(info['dig']) == len(montage_ch_only.dig)
assert_allclose(actual=np.array([ch['loc'][:6] for ch in info['chs']]),
desired=[[0., 1., 2., 0., 0., 0.],
[3., 4., 5., 0., 0., 0.],
[6., 7., 8., 0., 0., 0.]])
montage_full = make_dig_montage(
ch_pos=dict(**ch_pos, EEG000=np.full(3, 42)), # 4 = 3 egg + 1 eeg_ref
nasion=[1, 1, 1], lpa=[2, 2, 2], rpa=[3, 3, 3],
hsp=np.full((N_HSP, 3), 4),
hpi=np.full((N_HPI, 3), 4),
coord_frame='head'
)
assert repr(montage_full) == (
'<DigMontage | 2 extras (headshape), 1 HPIs, 3 fiducials, 4 channels>'
)
info = create_info(ch_names, sfreq=1, ch_types='eeg')
info.set_montage(montage_full)
EXPECTED_LEN = sum({'hsp': 2, 'hpi': 1, 'fid': 3, 'eeg': 4}.values())
assert len(info['dig']) == EXPECTED_LEN
assert_allclose(actual=np.array([ch['loc'][:6] for ch in info['chs']]),
desired=[[0., 1., 2., 42., 42., 42.],
[3., 4., 5., 42., 42., 42.],
[6., 7., 8., 42., 42., 42.]])
@testing.requires_testing_data
def test_fif_dig_montage(tmpdir):
"""Test FIF dig montage support."""
dig_montage = read_dig_fif(fif_dig_montage_fname)
# test round-trip IO
temp_dir = str(tmpdir)
fname_temp = op.join(temp_dir, 'test.fif')
_check_roundtrip(dig_montage, fname_temp)
# Make a BrainVision file like the one the user would have had
raw_bv = read_raw_brainvision(bv_fname, preload=True)
raw_bv_2 = raw_bv.copy()
mapping = dict()
for ii, ch_name in enumerate(raw_bv.ch_names):
mapping[ch_name] = 'EEG%03d' % (ii + 1,)
raw_bv.rename_channels(mapping)
for ii, ch_name in enumerate(raw_bv_2.ch_names):
mapping[ch_name] = 'EEG%03d' % (ii + 33,)
raw_bv_2.rename_channels(mapping)
raw_bv.add_channels([raw_bv_2])
for ch in raw_bv.info['chs']:
ch['kind'] = FIFF.FIFFV_EEG_CH
# Set the montage
raw_bv.set_montage(dig_montage)
# Check the result
evoked = read_evokeds(evoked_fname)[0]
# check info[chs] matches
assert_equal(len(raw_bv.ch_names), len(evoked.ch_names) - 1)
for ch_py, ch_c in zip(raw_bv.info['chs'], evoked.info['chs'][:-1]):
assert_equal(ch_py['ch_name'],
ch_c['ch_name'].replace('EEG ', 'EEG'))
# C actually says it's unknown, but it's not (?):
# assert_equal(ch_py['coord_frame'], ch_c['coord_frame'])
assert_equal(ch_py['coord_frame'], FIFF.FIFFV_COORD_HEAD)
c_loc = ch_c['loc'].copy()
c_loc[c_loc == 0] = np.nan
assert_allclose(ch_py['loc'], c_loc, atol=1e-7)
# check info[dig]
assert_dig_allclose(raw_bv.info, evoked.info)
# Roundtrip of non-FIF start
montage = make_dig_montage(hsp=read_polhemus_fastscan(hsp),
hpi=read_mrk(hpi))
elp_points = read_polhemus_fastscan(elp)
ch_pos = {"EEG%03d" % (k + 1): pos for k, pos in enumerate(elp_points[8:])}
montage += make_dig_montage(nasion=elp_points[0],
lpa=elp_points[1],
rpa=elp_points[2],
ch_pos=ch_pos)
_check_roundtrip(montage, fname_temp, 'unknown')
montage = transform_to_head(montage)
_check_roundtrip(montage, fname_temp)
montage.dig[0]['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN
with pytest.raises(RuntimeError, match='Only a single coordinate'):
montage.save(fname_temp)
@testing.requires_testing_data
def test_egi_dig_montage(tmpdir):
"""Test EGI MFF XML dig montage support."""
dig_montage = read_dig_egi(egi_dig_montage_fname)
fid, coord = _get_fid_coords(dig_montage.dig)
assert coord == FIFF.FIFFV_COORD_UNKNOWN
assert_allclose(
actual=np.array([fid[key] for key in ['nasion', 'lpa', 'rpa']]),
desired=[[ 0. , 10.564, -2.051], # noqa
[-8.592, 0.498, -4.128], # noqa
[ 8.592, 0.498, -4.128]], # noqa
)
# Test accuracy and embedding within raw object
raw_egi = read_raw_egi(egi_raw_fname, channel_naming='EEG %03d')
raw_egi.set_montage(dig_montage)
test_raw_egi = read_raw_fif(egi_fif_fname)
assert_equal(len(raw_egi.ch_names), len(test_raw_egi.ch_names))
for ch_raw, ch_test_raw in zip(raw_egi.info['chs'],
test_raw_egi.info['chs']):
assert_equal(ch_raw['ch_name'], ch_test_raw['ch_name'])
assert_equal(ch_raw['coord_frame'], FIFF.FIFFV_COORD_HEAD)
assert_allclose(ch_raw['loc'], ch_test_raw['loc'], atol=1e-7)
assert_dig_allclose(raw_egi.info, test_raw_egi.info)
dig_montage_in_head = transform_to_head(dig_montage.copy())
fid, coord = _get_fid_coords(dig_montage_in_head.dig)
assert coord == FIFF.FIFFV_COORD_HEAD
assert_allclose(
actual=np.array([fid[key] for key in ['nasion', 'lpa', 'rpa']]),
desired=[[0., 10.278, 0.], [-8.592, 0., 0.], [8.592, 0., 0.]],
atol=1e-4,
)
# test round-trip IO
fname_temp = tmpdir.join('egi_test.fif')
_check_roundtrip(dig_montage, fname_temp, 'unknown')
_check_roundtrip(dig_montage_in_head, fname_temp)
def _pop_montage(dig_montage, ch_name):
# remove reference that was not used in old API
name_idx = dig_montage.ch_names.index(ch_name)
dig_idx = dig_montage._get_dig_names().index(ch_name)
del dig_montage.dig[dig_idx]
del dig_montage.ch_names[name_idx]
for k in range(dig_idx, len(dig_montage.dig)):
dig_montage.dig[k]['ident'] -= 1
@testing.requires_testing_data
def test_read_dig_captrak(tmpdir):
"""Test reading a captrak montage file."""
EXPECTED_CH_NAMES_OLD = [
'AF3', 'AF4', 'AF7', 'AF8', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'CP1',
'CP2', 'CP3', 'CP4', 'CP5', 'CP6', 'CPz', 'Cz', 'F1', 'F2', 'F3', 'F4',
'F5', 'F6', 'F7', 'F8', 'FC1', 'FC2', 'FC3', 'FC4', 'FC5', 'FC6',
'FT10', 'FT7', 'FT8', 'FT9', 'Fp1', 'Fp2', 'Fz', 'GND', 'O1', 'O2',
'Oz', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'PO10', 'PO3',
'PO4', 'PO7', 'PO8', 'PO9', 'POz', 'Pz', 'REF', 'T7', 'T8', 'TP10',
'TP7', 'TP8', 'TP9'
]
EXPECTED_CH_NAMES = [
'T7', 'FC5', 'F7', 'C5', 'FT7', 'FT9', 'TP7', 'TP9', 'P7', 'CP5',
'PO7', 'C3', 'CP3', 'P5', 'P3', 'PO3', 'PO9', 'O1', 'Oz', 'POz', 'O2',
'PO4', 'P1', 'Pz', 'P2', 'CP2', 'CP1', 'CPz', 'Cz', 'C1', 'FC1', 'FC3',
'REF', 'F3', 'F1', 'Fz', 'F5', 'AF7', 'AF3', 'Fp1', 'GND', 'F2', 'AF4',
'Fp2', 'F4', 'F8', 'F6', 'AF8', 'FC2', 'FC6', 'FC4', 'C2', 'C4', 'P4',
'CP4', 'PO8', 'P8', 'P6', 'CP6', 'PO10', 'TP10', 'TP8', 'FT10', 'T8',
'C6', 'FT8'
]
assert set(EXPECTED_CH_NAMES) == set(EXPECTED_CH_NAMES_OLD)
montage = read_dig_captrak(
fname=op.join(data_path, 'montage', 'captrak_coords.bvct')
)
assert montage.ch_names == EXPECTED_CH_NAMES
assert repr(montage) == (
'<DigMontage | '
'0 extras (headshape), 0 HPIs, 3 fiducials, 66 channels>'
)
montage = transform_to_head(montage) # transform_to_head has to be tested
_check_roundtrip(montage=montage, fname=str(tmpdir.join('bvct_test.fif')))
fid, _ = _get_fid_coords(montage.dig)
assert_allclose(
actual=np.array([fid.nasion, fid.lpa, fid.rpa]),
desired=[[0, 0.11309, 0], [-0.09189, 0, 0], [0.09240, 0, 0]],
atol=1e-5,
)
raw_bv = read_raw_brainvision(bv_raw_fname)
raw_bv.set_channel_types({"HEOG": 'eog', "VEOG": 'eog', "ECG": 'ecg'})
raw_bv.set_montage(montage)
test_raw_bv = read_raw_fif(bv_fif_fname)
# compare after set_montage using chs loc.
for actual, expected in zip(raw_bv.info['chs'], test_raw_bv.info['chs']):
assert_allclose(actual['loc'][:3], expected['loc'][:3])
if actual['kind'] == FIFF.FIFFV_EEG_CH:
assert_allclose(actual['loc'][3:6],
[-0.005103, 0.05395, 0.144622], rtol=1e-04)
# https://gist.github.com/larsoner/2264fb5895070d29a8c9aa7c0dc0e8a6
_MGH60 = [
'Fz', 'F2', 'AF4', 'Fpz', 'Fp1', 'AF8', 'FT9', 'F7', 'FC5', 'FC6', 'FT7',
'F1', 'AF7', 'FT8', 'F6', 'F5', 'FC1', 'FC2', 'FT10', 'T9', 'Cz', 'F4',
'T7', 'C2', 'C4', 'C1', 'C3', 'F8', 'F3', 'C5', 'Fp2', 'AF3',
'CP2', 'P2', 'O2', 'Iz', 'Oz', 'PO4', 'O1', 'P8', 'PO8', 'P6', 'PO7', 'PO3', 'C6', 'TP9', 'TP8', 'CP4', 'P4', # noqa
'CP3', 'CP1', 'TP7', 'P3', 'Pz', 'P1', 'P7', 'P5', 'TP10', 'T8', 'T10',
]
@pytest.mark.parametrize('rename', ('raw', 'montage', 'custom'))
def test_set_montage_mgh(rename):
"""Test setting 'mgh60' montage to old fif."""
raw = read_raw_fif(fif_fname)
eeg_picks = pick_types(raw.info, meg=False, eeg=True, exclude=())
assert list(eeg_picks) == [ii for ii, name in enumerate(raw.ch_names)
if name.startswith('EEG')]
orig_pos = np.array([raw.info['chs'][pick]['loc'][:3]
for pick in eeg_picks])
atol = 1e-6
if rename == 'raw':
raw.rename_channels(lambda x: x.replace('EEG ', 'EEG'))
raw.set_montage('mgh60') # test loading with string argument
elif rename == 'montage':
mon = make_standard_montage('mgh60')
mon.rename_channels(lambda x: x.replace('EEG', 'EEG '))
assert [raw.ch_names[pick] for pick in eeg_picks] == mon.ch_names
raw.set_montage(mon)
else:
atol = 3e-3 # XXX old defs here apparently (maybe not realistic)?
assert rename == 'custom'
assert len(_MGH60) == 60
mon = make_standard_montage('standard_1020')
def renamer(x):
try:
return 'EEG %03d' % (_MGH60.index(x) + 1,)
except ValueError:
return x
mon.rename_channels(renamer)
raw.set_montage(mon)
new_pos = np.array([ch['loc'][:3] for ch in raw.info['chs']
if ch['ch_name'].startswith('EEG')])
assert ((orig_pos != new_pos).all())
r0 = _fit_sphere(new_pos)[1]
assert_allclose(r0, [0.000775, 0.006881, 0.047398], atol=1e-3)
# spot check
assert_allclose(new_pos[:2], [[0.000273, 0.084920, 0.105838],
[0.028822, 0.083529, 0.099164]], atol=atol)
# XXX: this does not check ch_names + it cannot work because of write_dig
def _check_roundtrip(montage, fname, coord_frame='head'):
"""Check roundtrip writing."""
montage.save(fname)
montage_read = read_dig_fif(fname=fname)
assert_equal(repr(montage), repr(montage_read))
assert_equal(_check_get_coord_frame(montage_read.dig), coord_frame)
assert_dig_allclose(montage, montage_read)
def _fake_montage(ch_names):
pos = np.random.RandomState(42).randn(len(ch_names), 3)
return make_dig_montage(ch_pos=dict(zip(ch_names, pos)),
coord_frame='head')
cnt_ignore_warns = [
pytest.mark.filterwarnings(
'ignore:.*Could not parse meas date from the header. Setting to None.'
),
pytest.mark.filterwarnings((
'ignore:.*Could not define the number of bytes automatically.'
' Defaulting to 2.')
),
]
def test_digmontage_constructor_errors():
"""Test proper error messaging."""
with pytest.raises(ValueError, match='does not match the number'):
_ = DigMontage(ch_names=['foo', 'bar'], dig=list())
def test_transform_to_head_and_compute_dev_head_t():
"""Test transform_to_head and compute_dev_head_t."""
EXPECTED_DEV_HEAD_T = \
[[-3.72201691e-02, -9.98212167e-01, -4.67667497e-02, -7.31583414e-04],
[8.98064989e-01, -5.39382685e-02, 4.36543170e-01, 1.60134431e-02],
[-4.38285221e-01, -2.57513699e-02, 8.98466990e-01, 6.13035748e-02],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]
EXPECTED_FID_IN_POLHEMUS = {
'nasion': np.array([0.001393, 0.0131613, -0.0046967]),
'lpa': np.array([-0.0624997, -0.0737271, 0.07996]),
'rpa': np.array([-0.0748957, 0.0873785, 0.0811943]),
}
EXPECTED_FID_IN_HEAD = {
'nasion': np.array([-8.94466792e-18, 1.10559624e-01, -3.85185989e-34]),
'lpa': np.array([-8.10816716e-02, 6.56321671e-18, 0]),
'rpa': np.array([8.05048781e-02, -6.47441364e-18, 0]),
}
hpi_dev = np.array(
[[ 2.13951493e-02, 8.47444056e-02, -5.65431188e-02], # noqa
[ 2.10299433e-02, -8.03141101e-02, -6.34420259e-02], # noqa
[ 1.05916829e-01, 8.18485672e-05, 1.19928083e-02], # noqa
[ 9.26595105e-02, 4.64804385e-02, 8.45141253e-03], # noqa
[ 9.42554419e-02, -4.35206589e-02, 8.78999363e-03]] # noqa
)
hpi_polhemus = np.array(
[[-0.0595004, -0.0704836, 0.075893 ], # noqa
[-0.0646373, 0.0838228, 0.0762123], # noqa
[-0.0135035, 0.0072522, -0.0268405], # noqa
[-0.0202967, -0.0351498, -0.0129305], # noqa
[-0.0277519, 0.0452628, -0.0222407]] # noqa
)
montage_polhemus = make_dig_montage(
**EXPECTED_FID_IN_POLHEMUS, hpi=hpi_polhemus, coord_frame='unknown'
)
montage_meg = make_dig_montage(hpi=hpi_dev, coord_frame='meg')
# Test regular workflow to get dev_head_t
montage = montage_polhemus + montage_meg
fids, _ = _get_fid_coords(montage.dig)
for kk in fids:
assert_allclose(fids[kk], EXPECTED_FID_IN_POLHEMUS[kk], atol=1e-5)
with pytest.raises(ValueError, match='set to head coordinate system'):
_ = compute_dev_head_t(montage)
montage = transform_to_head(montage)
fids, _ = _get_fid_coords(montage.dig)
for kk in fids:
assert_allclose(fids[kk], EXPECTED_FID_IN_HEAD[kk], atol=1e-5)
dev_head_t = compute_dev_head_t(montage)
assert_allclose(dev_head_t['trans'], EXPECTED_DEV_HEAD_T, atol=5e-7)
# Test errors when number of HPI points do not match
EXPECTED_ERR_MSG = 'Device-to-Head .*Got 0 .*device and 5 points in head'
with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):
_ = compute_dev_head_t(transform_to_head(montage_polhemus))
EXPECTED_ERR_MSG = 'Device-to-Head .*Got 5 .*device and 0 points in head'
with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):
_ = compute_dev_head_t(transform_to_head(
montage_meg + make_dig_montage(**EXPECTED_FID_IN_POLHEMUS)
))
EXPECTED_ERR_MSG = 'Device-to-Head .*Got 3 .*device and 5 points in head'
with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):
_ = compute_dev_head_t(transform_to_head(
DigMontage(dig=_format_dig_points(montage_meg.dig[:3])) +
montage_polhemus
))
def test_set_montage_with_mismatching_ch_names():
"""Test setting a DigMontage with mismatching ch_names."""
raw = read_raw_fif(fif_fname)
montage = make_standard_montage('mgh60')
# 'EEG 001' and 'EEG001' won't match
missing_err = '60 channel positions not present'
with pytest.raises(ValueError, match=missing_err):
raw.set_montage(montage)
montage.ch_names = [ # modify the names in place
name.replace('EEG', 'EEG ') for name in montage.ch_names
]
raw.set_montage(montage) # does not raise
# Case sensitivity
raw.rename_channels(lambda x: x.lower())
with pytest.raises(ValueError, match=missing_err):
raw.set_montage(montage)
# should work
raw.set_montage(montage, match_case=False)
raw.rename_channels(lambda x: x.upper()) # restore
assert 'EEG 001' in raw.ch_names and 'eeg 001' not in raw.ch_names
raw.rename_channels({'EEG 002': 'eeg 001'})
assert 'EEG 001' in raw.ch_names and 'eeg 001' in raw.ch_names
raw.set_channel_types({'eeg 001': 'misc'})
raw.set_montage(montage)
raw.set_channel_types({'eeg 001': 'eeg'})
with pytest.raises(ValueError, match='1 channel position not present'):
raw.set_montage(montage)
with pytest.raises(ValueError, match='match_case=False as 1 channel name'):
raw.set_montage(montage, match_case=False)
info = create_info(['EEG 001'], 1000., 'eeg')
mon = make_dig_montage({'EEG 001': np.zeros(3), 'eeg 001': np.zeros(3)},
nasion=[0, 1., 0], rpa=[1., 0, 0], lpa=[-1., 0, 0])
info.set_montage(mon)
with pytest.raises(ValueError, match='match_case=False as 1 montage name'):
info.set_montage(mon, match_case=False)
def test_set_montage_with_sub_super_set_of_ch_names():
"""Test info and montage ch_names matching criteria."""
N_CHANNELS = len('abcdef')
montage = _make_toy_dig_montage(N_CHANNELS, coord_frame='head')
# montage and info match
info = create_info(ch_names=list('abcdef'), sfreq=1, ch_types='eeg')
info.set_montage(montage)
# montage is a SUPERset of info
info = create_info(list('abc'), sfreq=1, ch_types='eeg')
info.set_montage(montage)
assert len(info['dig']) == len(list('abc'))
# montage is a SUBset of info
_MSG = 'subset of info. There are 2 .* not present in the DigMontage'
info = create_info(ch_names=list('abcdfgh'), sfreq=1, ch_types='eeg')
with pytest.raises(ValueError, match=_MSG) as exc:
info.set_montage(montage)
# plus suggestions
assert exc.match('set_channel_types')
assert exc.match('on_missing')
def test_heterogeneous_ch_type():
"""Test ch_names matching criteria with heterogeneous ch_type."""
VALID_MONTAGE_NAMED_CHS = ('eeg', 'ecog', 'seeg')
montage = _make_toy_dig_montage(
n_channels=len(VALID_MONTAGE_NAMED_CHS),
coord_frame='head',
)
# Montage and info match
info = create_info(montage.ch_names, 1., list(VALID_MONTAGE_NAMED_CHS))
RawArray(np.zeros((3, 1)), info, copy=None).set_montage(montage)
def test_set_montage_coord_frame_in_head_vs_unknown():
"""Test set montage using head and unknown only."""
N_CHANNELS, NaN = 3, np.nan
raw = _make_toy_raw(N_CHANNELS)
montage_in_head = _make_toy_dig_montage(N_CHANNELS, coord_frame='head')
montage_in_unknown = _make_toy_dig_montage(
N_CHANNELS, coord_frame='unknown'
)
montage_in_unknown_with_fid = _make_toy_dig_montage(
N_CHANNELS, coord_frame='unknown',
nasion=[0, 1, 0], lpa=[1, 0, 0], rpa=[-1, 0, 0],
)
assert_allclose(
actual=np.array([ch['loc'] for ch in raw.info['chs']]),
desired=np.full((N_CHANNELS, 12), np.nan)
)
raw.set_montage(montage_in_head)
assert_allclose(
actual=np.array([ch['loc'] for ch in raw.info['chs']]),
desired=[
[0., 1., 2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],
[3., 4., 5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],
[6., 7., 8., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],
]
)
with pytest.warns(RuntimeWarning, match='assuming identity'):
raw.set_montage(montage_in_unknown)
raw.set_montage(montage_in_unknown_with_fid)
assert_allclose(
actual=np.array([ch['loc'] for ch in raw.info['chs']]),
desired=[
[-0., 1., -2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],
[-3., 4., -5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],
[-6., 7., -8., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],
]
)
# check no collateral effects from transforming montage
assert _check_get_coord_frame(montage_in_unknown_with_fid.dig) == 'unknown'
assert_array_equal(
_get_dig_montage_pos(montage_in_unknown_with_fid),
[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
)
def test_set_montage_with_missing_coordinates():
"""Test set montage with missing coordinates."""
N_CHANNELS, NaN = 3, np.nan
raw = _make_toy_raw(N_CHANNELS)
raw.set_channel_types({ch: 'ecog' for ch in raw.ch_names})
# don't include all the channels
ch_names = raw.ch_names[1:]
n_channels = len(ch_names)
ch_coords = np.arange(n_channels * 3).reshape(n_channels, 3)
montage_in_mri = make_dig_montage(
ch_pos=dict(zip(ch_names, ch_coords,)),
coord_frame='unknown',
nasion=[0, 1, 0], lpa=[1, 0, 0], rpa=[-1, 0, 0],
)
with pytest.raises(ValueError, match='DigMontage is '
'only a subset of info'):
raw.set_montage(montage_in_mri)
with pytest.raises(ValueError, match='Invalid value'):
raw.set_montage(montage_in_mri, on_missing='foo')
with pytest.raises(TypeError, match='must be an instance'):
raw.set_montage(montage_in_mri, on_missing=True)
with pytest.warns(RuntimeWarning, match='DigMontage is '
'only a subset of info'):
raw.set_montage(montage_in_mri, on_missing='warn')
raw.set_montage(montage_in_mri, on_missing='ignore')
assert_allclose(
actual=np.array([ch['loc'] for ch in raw.info['chs']]),
desired=[
[NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN],
[0., 1., -2., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],
[-3., 4., -5., 0., 0., 0., NaN, NaN, NaN, NaN, NaN, NaN],
]
)
@testing.requires_testing_data
def test_get_montage():
"""Test get montage from Instance.
Test with standard montage and then loaded in montage.
"""
# 1. read in testing data and assert montage roundtrip
# for testing dataset: 'test_raw.fif'
raw = read_raw_fif(fif_fname)
raw = raw.rename_channels(lambda name: name.replace('EEG ', 'EEG'))
raw2 = raw.copy()
# get montage and then set montage and
# it should be the same
montage = raw.get_montage()
raw.set_montage(montage, on_missing='raise')
test_montage = raw.get_montage()
assert_object_equal(raw.info['chs'], raw2.info['chs'])
assert_dig_allclose(raw2.info, raw.info)
assert_object_equal(raw2.info['dig'], raw.info['dig'])
# the montage does not change
assert_object_equal(montage.dig, test_montage.dig)
# the montage should fulfill a roundtrip with make_dig_montage
test2_montage = make_dig_montage(**montage.get_positions())
assert_object_equal(test2_montage.dig, test_montage.dig)
# 2. now do a standard montage
montage = make_standard_montage('mgh60')
# set the montage; note renaming to make standard montage map
raw.set_montage(montage)
# get montage back and set it
# the channel locations should be the same
raw2 = raw.copy()
test_montage = raw.get_montage()
raw.set_montage(test_montage, on_missing='ignore')
# the montage should fulfill a roundtrip with make_dig_montage
test2_montage = make_dig_montage(**test_montage.get_positions())
assert_object_equal(test2_montage.dig, test_montage.dig)
# chs should not change
assert_object_equal(raw2.info['chs'], raw.info['chs'])
# dig order might be different after set_montage
assert montage.ch_names == test_montage.ch_names
# note that test_montage will have different coordinate frame
# compared to standard montage
assert_dig_allclose(raw2.info, raw.info)
assert_object_equal(raw2.info['dig'], raw.info['dig'])
# 3. if montage gets set to None
raw.set_montage(None)
assert raw.get_montage() is None
# 4. read in BV test dataset and make sure montage
# fulfills roundtrip on non-standard montage
dig_montage = read_dig_fif(fif_dig_montage_fname)
# Make a BrainVision file like the one the user would have had
# with testing dataset 'test.vhdr'
raw_bv = read_raw_brainvision(bv_fname, preload=True)
raw_bv_2 = raw_bv.copy()
# rename channels to make it have the full set
# of channels
mapping = dict()
for ii, ch_name in enumerate(raw_bv.ch_names):
mapping[ch_name] = 'EEG%03d' % (ii + 1,)
raw_bv.rename_channels(mapping)
for ii, ch_name in enumerate(raw_bv_2.ch_names):
mapping[ch_name] = 'EEG%03d' % (ii + 33,)
raw_bv_2.rename_channels(mapping)
raw_bv.add_channels([raw_bv_2])
for ch in raw_bv.info['chs']:
ch['kind'] = FIFF.FIFFV_EEG_CH
# Set the montage and roundtrip
raw_bv.set_montage(dig_montage)
raw_bv2 = raw_bv.copy()
# reset the montage
test_montage = raw_bv.get_montage()
raw_bv.set_montage(test_montage, on_missing='ignore')
# dig order might be different after set_montage
assert_object_equal(raw_bv2.info['dig'], raw_bv.info['dig'])
assert_dig_allclose(raw_bv2.info, raw_bv.info)
# if dig is not set in the info, then montage returns None
raw.info['dig'] = None
assert raw.get_montage() is None
# the montage should fulfill a roundtrip with make_dig_montage
test2_montage = make_dig_montage(**test_montage.get_positions())
assert_object_equal(test2_montage.dig, test_montage.dig)
def test_read_dig_hpts():
"""Test reading .hpts file (from MNE legacy)."""
fname = op.join(
op.dirname(_BRAINVISON_FILE), 'tests', 'data', 'test.hpts'
)
montage = read_dig_hpts(fname)
assert repr(montage) == (
'<DigMontage | '
'0 extras (headshape), 5 HPIs, 3 fiducials, 34 channels>'
)
def test_get_builtin_montages():
"""Test help function to obtain builtin montages."""
EXPECTED_NUM = 24
assert len(get_builtin_montages()) == EXPECTED_NUM
@testing.requires_testing_data
def test_plot_montage():
"""Test plotting montage."""
# gh-8025
montage = read_dig_captrak(bvct_dig_montage_fname)
montage.plot()
plt.close('all')
run_tests_if_main()
| bsd-3-clause |
GustavePate/perfect_python_script | pps/demos/demo_matplot.py | 1 | 1062 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
logger = logging.getLogger(__name__)
def demo():
res = False
try:
logger.info("****** Matplot demo *******")
# get a date axis
x = pd.date_range('1/1/2015', periods=72, freq='H')
y = np.arange(72)
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y %H:%M'))
# date x axis tuning: every 4 hours
# loc = mdates.HourLocator(interval=4)
# date x axis tuning: full auto (between x and y tick, be regular and go !)
loc = mdates.AutoDateLocator(minticks=4, maxticks=16, interval_multiples=True)
plt.gca().xaxis.set_major_locator(loc)
# plot
plt.plot(x, y)
plt.gcf().autofmt_xdate()
plt.show()
except Exception:
logger.exception("Matplot demo failed")
raise
else:
res = True
finally:
return res
| mit |
lightalchemist/ML-algorithms | dim_reduction/test_spectral_embedding.py | 1 | 1960 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
import spectral_embedding
import sys
sys.path.insert(0, '../cluster')
import kmeans
def test():
k = 2
X, y_true = make_moons(n_samples=500, random_state=0, noise=0.01)
Y = spectral_embedding.transform(X, k, n_neighbors=7, sigma=0.1)
n = np.linalg.norm(Y, axis=1)
n = n.reshape(-1, 1)
Y = Y / n
# Apply K-Means to cluster Y
y_pred, _, _ = kmeans.kmeans(Y, k)
fig = plt.figure()
ax = fig.add_subplot(121)
ax.scatter(np.arange(len(Y)), Y[:, 0])
ax.set_title("Eigenvector 1")
ax = fig.add_subplot(122)
ax.scatter(np.arange(len(Y)), Y[:, 1])
ax.set_title("Eigenvector 2")
# Plot the data
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X[y_true==0, 0], X[y_true==0, 1], c='b', alpha=0.5, label="Class 1")
ax.scatter(X[y_true==1, 0], X[y_true==1, 1], c='g', alpha=0.5, label="Class 2")
ax.set_title("Original data")
ax.legend()
# Plot the predictions
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X[y_pred==0, 0], X[y_pred==0, 1], c='r', alpha=0.5, label="Class 1")
ax.scatter(X[y_pred==1, 0], X[y_pred==1, 1], c='y', alpha=0.5, label="Class 2")
ax.set_title("Result of clustering")
ax.legend()
# Plot the transformed data
fig = plt.figure()
ax = fig.add_subplot(111)
idx_class0 = np.argwhere(y_true==0)
idx_class1 = np.argwhere(y_true==1)
ax.scatter(Y[idx_class0, 0], Y[idx_class0, 1], c='b', alpha=0.5, label="Class 1")
ax.scatter(Y[idx_class1, 0], Y[idx_class1, 1], c='g', alpha=0.5, label="Class 2")
ax.set_title("Original data after spectral embedding")
ax.legend()
print("Number in class 0: {}".format(np.sum(y_pred==0)))
print("Number in class 1: {}".format(np.sum(y_pred==1)))
plt.show()
if __name__ == '__main__':
test()
| mit |
ueshin/apache-spark | python/pyspark/pandas/data_type_ops/boolean_ops.py | 6 | 15029 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from typing import cast, Any, Union
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.pandas.base import column_op, IndexOpsMixin
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.data_type_ops.base import (
DataTypeOps,
is_valid_operand_for_numeric_arithmetic,
transform_boolean_operand_to_numeric,
_as_bool_type,
_as_categorical_type,
_as_other_type,
)
from pyspark.pandas.internal import InternalField
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import extension_dtypes, pandas_on_spark_type
from pyspark.pandas.typedef.typehints import as_spark_type
from pyspark.sql import functions as F
from pyspark.sql.column import Column
from pyspark.sql.types import BooleanType, StringType
class BooleanOps(DataTypeOps):
"""
The class for binary operations of pandas-on-Spark objects with spark type: BooleanType.
"""
@property
def pretty_name(self) -> str:
return "bools"
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError(
"Addition can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, bool):
return left.__or__(right)
elif isinstance(right, numbers.Number):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return left + right
else:
assert isinstance(right, IndexOpsMixin)
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BooleanType):
return left.__or__(right)
else:
left = transform_boolean_operand_to_numeric(left, right.spark.data_type)
return left + right
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right, allow_bool=False):
raise TypeError(
"Subtraction can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return left - right
else:
assert isinstance(right, IndexOpsMixin)
left = transform_boolean_operand_to_numeric(left, right.spark.data_type)
return left - right
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError(
"Multiplication can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, bool):
return left.__and__(right)
elif isinstance(right, numbers.Number):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return left * right
else:
assert isinstance(right, IndexOpsMixin)
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BooleanType):
return left.__and__(right)
else:
left = transform_boolean_operand_to_numeric(left, right.spark.data_type)
return left * right
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right, allow_bool=False):
raise TypeError(
"True division can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return left / right
else:
assert isinstance(right, IndexOpsMixin)
left = transform_boolean_operand_to_numeric(left, right.spark.data_type)
return left / right
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right, allow_bool=False):
raise TypeError(
"Floor division can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return left // right
else:
assert isinstance(right, IndexOpsMixin)
left = transform_boolean_operand_to_numeric(left, right.spark.data_type)
return left // right
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right, allow_bool=False):
raise TypeError(
"Modulo can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return left % right
else:
assert isinstance(right, IndexOpsMixin)
left = transform_boolean_operand_to_numeric(left, right.spark.data_type)
return left % right
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right, allow_bool=False):
raise TypeError(
"Exponentiation can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return left ** right
else:
assert isinstance(right, IndexOpsMixin)
left = transform_boolean_operand_to_numeric(left, right.spark.data_type)
return left ** right
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, bool):
return left.__or__(right)
elif isinstance(right, numbers.Number):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return right + left
else:
raise TypeError(
"Addition can not be applied to %s and the given type." % self.pretty_name
)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return right - left
else:
raise TypeError(
"Subtraction can not be applied to %s and the given type." % self.pretty_name
)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, bool):
return left.__and__(right)
elif isinstance(right, numbers.Number):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return right * left
else:
raise TypeError(
"Multiplication can not be applied to %s and the given type." % self.pretty_name
)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return right / left
else:
raise TypeError(
"True division can not be applied to %s and the given type." % self.pretty_name
)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return right // left
else:
raise TypeError(
"Floor division can not be applied to %s and the given type." % self.pretty_name
)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return right ** left
else:
raise TypeError(
"Exponentiation can not be applied to %s and the given type." % self.pretty_name
)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = left.spark.transform(lambda scol: scol.cast(as_spark_type(type(right))))
return right % left
else:
raise TypeError(
"Modulo can not be applied to %s and the given type." % self.pretty_name
)
def __and__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, IndexOpsMixin) and isinstance(right.dtype, extension_dtypes):
return right.__and__(left)
else:
def and_func(left: Column, right: Any) -> Column:
if not isinstance(right, Column):
if pd.isna(right):
right = SF.lit(None)
else:
right = SF.lit(right)
scol = left & right
return F.when(scol.isNull(), False).otherwise(scol)
return column_op(and_func)(left, right)
def __or__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, IndexOpsMixin) and isinstance(right.dtype, extension_dtypes):
return right.__or__(left)
else:
def or_func(left: Column, right: Any) -> Column:
if not isinstance(right, Column) and pd.isna(right):
return SF.lit(False)
else:
scol = left | SF.lit(right)
return F.when(left.isNull() | scol.isNull(), False).otherwise(scol)
return column_op(or_func)(left, right)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
if isinstance(dtype, extension_dtypes):
scol = F.when(
index_ops.spark.column.isNotNull(),
F.when(index_ops.spark.column, "True").otherwise("False"),
)
else:
null_str = str(None)
casted = F.when(index_ops.spark.column, "True").otherwise("False")
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
else:
return _as_other_type(index_ops, dtype, spark_type)
def neg(self, operand: IndexOpsLike) -> IndexOpsLike:
return ~operand
def abs(self, operand: IndexOpsLike) -> IndexOpsLike:
return operand
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__lt__)(left, right)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__le__)(left, right)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__ge__)(left, right)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__gt__)(left, right)
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
from pyspark.pandas.base import column_op
return cast(IndexOpsLike, column_op(Column.__invert__)(operand))
class BooleanExtensionOps(BooleanOps):
"""
The class for binary operations of pandas-on-Spark objects with spark type BooleanType,
and dtype BooleanDtype.
"""
@property
def pretty_name(self) -> str:
return "booleans"
def __and__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
def and_func(left: Column, right: Any) -> Column:
if not isinstance(right, Column):
if pd.isna(right):
right = SF.lit(None)
else:
right = SF.lit(right)
return left & right
return column_op(and_func)(left, right)
def __or__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
def or_func(left: Column, right: Any) -> Column:
if not isinstance(right, Column):
if pd.isna(right):
right = SF.lit(None)
else:
right = SF.lit(right)
return left | right
return column_op(or_func)(left, right)
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col.astype(self.dtype)
def neg(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary - can not be applied to %s." % self.pretty_name)
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary ~ can not be applied to %s." % self.pretty_name)
def abs(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("abs() can not be applied to %s." % self.pretty_name)
| apache-2.0 |
matthew-tucker/mne-python | examples/inverse/plot_dics_beamformer.py | 18 | 2905 | """
=====================================
Compute DICS beamfomer on evoked data
=====================================
Compute a Dynamic Imaging of Coherent Sources (DICS) beamformer from single
trial activity in a time-frequency window to estimate source time courses based
on evoked data.
The original reference for DICS is:
Gross et al. Dynamic imaging of coherent sources: Studying neural interactions
in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
"""
# Author: Roman Goj <[email protected]>
#
# License: BSD (3-clause)
import mne
import matplotlib.pyplot as plt
import numpy as np
from mne.io import Raw
from mne.datasets import sample
from mne.time_frequency import compute_epochs_csd
from mne.beamformer import dics
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'
###############################################################################
# Read raw data
raw = Raw(raw_fname)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
# Set picks
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
# Read epochs
event_id, tmin, tmax = 1, -0.2, 0.5
events = mne.read_events(event_fname)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12))
evoked = epochs.average()
# Read forward operator
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Computing the data and noise cross-spectral density matrices
# The time-frequency window was chosen on the basis of spectrograms from
# example time_frequency/plot_time_frequency.py
data_csd = compute_epochs_csd(epochs, mode='multitaper', tmin=0.04, tmax=0.15,
fmin=6, fmax=10)
noise_csd = compute_epochs_csd(epochs, mode='multitaper', tmin=-0.11, tmax=0.0,
fmin=6, fmax=10)
evoked = epochs.average()
# Compute DICS spatial filter and estimate source time courses on evoked data
stc = dics(evoked, forward, noise_csd, data_csd)
plt.figure()
ts_show = -30 # show the 40 largest responses
plt.plot(1e3 * stc.times,
stc.data[np.argsort(stc.data.max(axis=1))[ts_show:]].T)
plt.xlabel('Time (ms)')
plt.ylabel('DICS value')
plt.title('DICS time course of the 30 largest sources.')
plt.show()
# Plot brain in 3D with PySurfer if available
brain = stc.plot(hemi='rh', subjects_dir=subjects_dir)
brain.set_data_time_index(180)
brain.show_view('lateral')
# Uncomment to save image
# brain.save_image('DICS_map.png')
| bsd-3-clause |
manu3618/legendary-potato | src/legendary_potato/classifiers.py | 1 | 19414 | # coding: utf-8
"""
Classifiers.
Based on sklearn doc:
"http://scikit-learn.org/dev/developers/contributing.html\
#rolling-your-own-estimator"
"""
from itertools import product
import numpy as np
import pandas as pd
from scipy.optimize import LinearConstraint, minimize
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_is_fitted
from .methods import KernelMethod
def multiclass2one_vs_all(labels, first_class=1):
"""Transform multiclas label to 2 class labels
Params:
labels (array-like): list of labels
first_class: label considered as not the rest
Returns:
(list) list of labels containing only 1/-1
"""
if first_class not in labels:
first_class = labels[0]
return [1 if elt == first_class else -1 for elt in labels]
class SVDD(BaseEstimator, ClassifierMixin, KernelMethod):
"""Implement Support Vector DataDescription
.. math::
\\begin{cases}
min_{r, c} & r^2 - C \\sum_t \\xi_t \\\\
s.t & y_i \\| \\phi(x_i) -c \\| < r^2 + xi_i \\forall i \\\\
& \\xi_i > 0 \\forall i \\\\
\\end{cases}
"""
def __init__(self, kernel_matrix=None, kernel=None, C=1):
"""Initialize some parameters.
Those parameters may be overwritten by the fit() method.
"""
self.kernel_matrix = kernel_matrix # kernel matrix used for training
if kernel is None:
self.kernel = np.dot
else:
self.kernel = kernel
self.C = C
self.string_labels = False # are labels strings or int?
self.hypersphere_nb = 1
self.trained_on_sample = True # use directly kernel matrix or sample?
def fit(self, X, y=None, C=None, kernel=None, is_kernel_matrix=False):
"""Fit the classifier.
Args:
X: training samples.
y: training labels. If None, consider all samples belongs to the
same class (labeled "1").
C (numeric): contraint in the soft margin case. If None or zero,
then fall back to hard margin case.
kernel (fun): kernel method to use. (default: linear)
is_kernel_matrix (bool): if True, the input is treated as
a kernel matrix.
"""
# X, y = check_X_y(X, y) # TODO: add check method for X
self._classifier_checks(X, y, C, kernel, is_kernel_matrix)
if len(self.classes_) > 2 or (
len(self.classes_) == 2 and self.string_labels
):
# each class has its own hypersphere (one class vs rest)
self.hypersphere_nb = len(self.classes_)
self.individual_svdd = {}
for cl in self.classes_:
# TODO: multithread/asyncio
cl_svdd = SVDD(
kernel_matrix=self.kernel_matrix,
kernel=self.kernel,
C=self.C,
)
cl_y = [1 if elt == cl else -1 for elt in y]
cl_svdd.fit(X, cl_y, C, kernel, is_kernel_matrix)
self.individual_svdd[cl] = cl_svdd
self.y_ = y
self.alphas_ = np.array([0])
self.radius_ = 0
else:
# one hypersphere
self.y_ = np.sign(y)
self.radius_, self.alphas_ = self._fit_one_hypersphere()
return self
def predict(self, X, decision_radius=1):
"""Predict classes
Args:
X (array like): list of test samples.
decision_radius (numeric): modification of decision radius.
The frontier between classes will be the computed hypersphere
whose radius is multiply by this factor.
"""
check_is_fitted(self, ["X_", "alphas_"])
# X = check_array(X)
if self.hypersphere_nb == 1:
return self._predict_one_hypersphere(X, decision_radius)
else:
# check class
dist_classes = self.relative_dist_all_centers(X)
return np.array(dist_classes.idxmin(axis=1))
def fit_predict(self, X, y, C=None, kernel=None, is_kernel_matrix=False):
"""Fit as the fit() methods.
Returns:
(array) : class for each training sample.
"""
self.fit(X, y, C, kernel, is_kernel_matrix)
self.predict(X)
def _predict_one_hypersphere(self, X=None, decision_radius=1):
"""Compute results for one hypersphere
Args:
decision_radius (numeric): modification of decision radius.
The frontier between classes will be the computed hypersphere whose
radius is multiply by this factor.
Returns:
(np.array)
"""
pred = self._dist_center(X) * decision_radius / self.radius_ - 1
ret = np.sign(pred).reshape(-1)
return list(map(lambda x: 1 if x == 0 else x, ret))
def decision_function(self, X):
"""Generic decision value.
Args:
X (array-like): list of sample
"""
return self._dist_center(X) / self.radius_
def _dist_center(self, X=None):
"""Compute ditance to class center.
Args:
X (array-like): list of input vectors. If None, use the train set.
Distance to center:
.. math::
\\| z - c \\|^2 = \\|z\\|^2 - 2 K(z, c) + \\|c\\|^2
c = \\sum_t \\alpha_t \\phi(X_t)
"""
if not self.hypersphere_nb == 1:
raise RuntimeWarning("Not available for multiclass SVDD")
check_is_fitted(self, ["X_", "alphas_"])
dim = len(self.alphas_)
if X is None:
# return distances for training set
square_dists = [
self.kernel_matrix[i, i]
- 2
* sum(
self.alphas_[t] * self.kernel_matrix[i, t]
for t in range(dim)
)
+ sum(
self.alphas_[t]
* self.alphas_[s]
* self.kernel_matrix[s, t]
for s in range(dim)
for t in range(dim)
)
for i in range(dim)
]
else:
# return distances for vector X
square_dists = [
self.kernel(z, z)
- 2
* sum(
self.alphas_[t] * self.kernel(self.X_[t], z)
for t in range(dim)
)
+ sum(
self.alphas_[s]
* self.alphas_[t]
* self.kernel(self.X_[t], self.X_[s])
for s in range(dim)
for t in range(dim)
)
for z in X
]
return np.sqrt(square_dists)
def _fit_one_hypersphere(self, y=None, class1=1, class2=-1):
"""Perform actual fit process
* compute alphas
* compute support vectors
* recompute minimal kernel matrix
"""
if y is None:
y = self.y_
dim = len(self.X_)
alphas = [1 / dim] * dim
C = self.C
upper = C * np.ones(dim)
one = np.array([1])
# TODO: test other solver
# https://pypi.org/project/quadprog/
# http://cvxopt.org/r
def ell_d(al):
"""Dual function to minimize.
function to maximize:
.. maths::
L_D = \\alpha diag(K)^T - \\alpha K \\alpha^T
L_D = \\sum_s \\alpha_s K<x_s, x_s>
- \\sum_s \\sum_t \\alpha_s \\alpha_t K(x_s, x_t)
"""
ay = al * y
return -(
np.mat(ay).dot(np.diag(self.kernel_matrix))
- np.mat(ay).dot(self.kernel_matrix).dot(np.mat(ay).T)
)
cons = [
# \forall i 0 \leq \alpha[i] \leq C
LinearConstraint(A=np.identity(dim), lb=np.zeros(dim), ub=upper),
# \sum_i \alpha[i] = 1
LinearConstraint(A=np.ones(dim), lb=one, ub=one),
]
# TODO: asyncio
predicted_alphas = minimize(
ell_d, alphas, constraints=cons, options={"maxiter": 10000}
)
if not predicted_alphas.success:
raise RuntimeError(predicted_alphas.message)
alphas = predicted_alphas.x
# nullify almost null alphas:
alphas = list(map(lambda x: 0 if np.isclose(x, 0) else x, alphas))
# support vectors: 0 < alphas <= C
support_vectors = set.intersection(
set(np.where(np.less_equal(alphas, C))[0]),
set(np.nonzero(alphas)[0]),
)
self.support_vectors_ = self.support_vectors_.union(support_vectors)
if len(self.support_vectors_) < 2:
radius = np.min(
self.distance_matrix() + np.diag([C for _ in range(dim)])
)
else:
# mean distance to support vectors
radius = np.mean(
[
self.dist_center_training_sample(r, alphas)
for r in self.support_vectors_
]
)
return radius, np.array(alphas)
def dist_all_centers(self, X=None):
"""Return distance to each class center.
"""
if self.hypersphere_nb > 1:
dist_classes = {
cl: svdd._dist_center(X)
for cl, svdd in self.individual_svdd.items()
}
else:
dist_classes = {1: self._dist_center(X)}
return pd.DataFrame(dist_classes)
def relative_dist_all_centers(self, X=None):
"""Distane to all centers divided by class radius.
"""
if self.hypersphere_nb > 1:
dist_classes = {
cl: svdd._dist_center(X) / svdd.radius_
for cl, svdd in self.individual_svdd.items()
}
else:
dist_classes = {1: self._dist_center(X) / self.radius_}
return pd.DataFrame(dist_classes)
def dist_center_training_sample(self, r, alphas=None, cl=None):
"""Distance from vector #r to center.
Args:
r (int): rank of the vector
alphas (array): list of alphas
cl : class whose center will be used.
"""
if cl is None:
cl = 1
if alphas is None:
if len(self.classes_) > 1:
alphas = alphas[cl]
else:
alphas = self.alphas
K = self.kernel_matrix
n = K.shape[0]
# dist:
# K_(r, r)
# - 2 \sum_t \alpha_t \K_(r ,t)
# + \sum_s\sum_t \alpha_s \alpha_t K_(r, t)
return sum(
[
K[r, r],
-2 * sum(alphas[t] * K[r, t] for t in range(n)),
sum(
alphas[s] * alphas[t] * K[r, t]
for s, t in product(range(n), range(n))
),
]
)
def _center_one_class(self, mapping):
"""Compute hypersphere center.
Args:
mapping (fun): feature map. Default to identity (consistent with
default kernel function)
"""
check_is_fitted(self, ["X_", "alphas_"])
if not self.trained_on_sample:
raise RuntimeError("No access to initial vectors")
center = np.sum(
[
self.alphas_[i] * np.array(mapping(self.X_[i]))
for i in range(len(self.X_))
],
axis=0,
)
return center
def center(self, mapping=lambda x: x):
"""Compute center coordonates.
Args:
mapping (fun): feature map. Default to identity (consistent with
default kernel function)
"""
if self.hypersphere_nb > 1:
return {
cl: svdd._center_one_class(mapping)
for cl, svdd in self.individual_svdd.items()
}
else:
return {1: self._center_one_class(mapping)}
class SVM(BaseEstimator, ClassifierMixin, KernelMethod):
"""Implement Support Vector Machine
.. math::
\\begin{cases}
min_{w, w_0, \\xi} & \\frac{1}{2} \\|w\\|^2 - C \\sum_t \\xi_t \\\\
s.t & y_t K(w, x_t) + w_0 > 1 - \\xi_t \\forall t \\\\
& \\xi_t > 0 \\forall t \\\\
\\end{cases}
"""
# TODO: multiclass: implement 1 set of alphas per class
# {class: [alphas], }
def __init__(self, kernel_matrix=None, kernel=None, C=1):
self.kernel_matrix = kernel_matrix # kernel matrix used for training
if kernel is None:
self.kernel = np.dot
else:
self.kernel = kernel
self.C = C
self.string_labels = False # are labels strings or int?
self.trained_on_sample = True # use directly kernel matrix or sample?
def fit(self, X, y=None, C=None, kernel=None, is_kernel_matrix=False):
"""Fit the classifier.
Args:
X: training samples.
y: training labels. If None, consider all samples belongs to the
same class (labeled "1").
C (numeric): contraint in the soft margin case. If None or zero,
then fall back to hard margin case.
kernel (fun): kernel method to use. (default: linear)
is_kernel_matrix (bool): if True, the input is treated as
a kernel matrix.
The optimisation problem is
.. math::
\\begin{cases}
min_{\\alpha} & \\frac{1}{2} \\alpha^T K \\alpha - \\alpha^T ones \\\\
s.t & 0 \\leq \\alpha_t < C \\forall t \\\\
& (0 \\leq I alpha \\leq C I) \\\\
& \\alpha^T diag(Y) = 0
\\end{cases}
The generic QP problem is
.. math::
\\begin{cases}
min_x & \\frac{1}{2} x^T.P.x + q^T.x \\\\
s.t & G.x \\leq h \\\\
& A.x = b
\\end{cases}
In this SVM case:
.. math::
G = \\begin{pmatrix}
-1 & & & 1 & & \\\\
& \\ddots & & & \\ddots & \\\\
& & -1 & & & 1
\\end{pmatrix}
.. math::
h = \\begin{pmatrix}
0 \\\\ \\vdots \\\\ 0 \\\\ C \\\\ \\vdots \\\\ C
\\end{pmatrix}
.. math::
A = \\begin{pmatrix}
y_1 & \\hdots & y_n
\\end{pmatrix}
.. math::
b = 0
"""
self._classifier_checks(X, y, C, kernel, is_kernel_matrix)
self.y_ = y
if len(self.classes_) > 2 or isinstance(y[0], str):
self.individual_svm = {}
for label in self.classes_:
self.individual_svm[label] = self._fit_two_classes(
X,
multiclass2one_vs_all(y, label),
C,
kernel,
is_kernel_matrix,
)
return self.individual_svm[self.y_[0]]
self.y_ = multiclass2one_vs_all(y)
if len(set(self.y_)) < 2:
# One class SVM
# TODO
raise NotImplementedError("One class SVM not implemeted.")
return self._fit_two_classes(X, self.y_, C, kernel, is_kernel_matrix)
def _fit_two_classes(self, X, y, C, kernel, is_kernel_matrix):
# TODO test other solver
#
# G = np.hstack([-1 * np.identity(dim), np.identity(dim)])
# h = np.matrix(
# np.hstack([np.zeros(dim), C * np.ones(dim)])
# ).transpose()
# A = np.matrix(self.y)
C = self.C
dim = len(self.X_)
alphas = [1 / dim] * dim # warm start
def ell_d(x):
"""Function to minimize.
"""
x = np.array(x).transpose()
return 1 / 2 * float(
x.transpose().dot(self.kernel_matrix).dot(x)
) - float(np.ones(dim).dot(x))
cons = [
LinearConstraint(
A=np.identity(dim), lb=np.zeros(dim), ub=C * np.ones(dim)
),
LinearConstraint(A=y, lb=np.array([0]), ub=np.array([0])),
]
predicted_alphas = minimize(
ell_d, alphas, constraints=cons, options={"maxiter": 10000}
)
if not predicted_alphas.success:
raise RuntimeError(predicted_alphas.message)
alphas = predicted_alphas.x
# nullify almost null alphas:
alphas = list(map(lambda x: 0 if np.isclose(x, 0) else x, alphas))
self.alphas_ = alphas
# support vectors: 0 < alphas <= C
support_vectors = set.intersection(
set(np.where(np.less_equal(alphas, C))[0]),
set(np.nonzero(alphas)[0]),
)
self.support_vectors_ = self.support_vectors_.union(support_vectors)
w0 = np.mean(
[
1
- np.sum(
[
y[t] * alphas[t] * self.kernel_matrix[i, t]
for t in range(dim)
]
)
for i in support_vectors
]
)
self.w0 = w0
return {"alphas": np.array(alphas), "w0": w0}
def predict(self, X=None, decision_value=0):
"""Predict classes
Args:
X (array like): list of test samples.
decision_value (numeric): decision value
"""
check_is_fitted(self, ["X_", "alphas_"])
if X is None:
X = self.X_
if hasattr(self, "individual_svm"):
# mmulticlass
results = self._dist_hyperplane_multiclass(X)
return results.transpose().idxmax()
else:
return [np.sign(self._dist_hyperplane(x)) for x in X]
def _dist_hyperplane(self, x, label=None):
if label is None:
# One hyperplane
check_is_fitted(self, ["X_", "alphas_"])
alphas = self.alphas_
y = self.y_
w0 = self.w0
else:
params = self.individual_svm[label]
alphas = params["alphas"]
w0 = params["w0"]
y = multiclass2one_vs_all(self.y_, label)
return (
sum(
[
alphas[i] * y[i] * self.kernel(x, self.X_[i])
for i in range(len(alphas))
]
)
+ w0
)
def _dist_hyperplane_multiclass(self, X):
res = {
lab: [self._dist_hyperplane(x, lab) for x in X]
for lab, res in self.individual_svm.items()
}
return pd.DataFrame(res)
def fit_predict(self, X, y, C=1, kernel=None, is_kernel_matrix=False):
"""Fit as the fit() methods.
Returns:
(array) : class for each training sample.
"""
self.fit(X, y, C, kernel, is_kernel_matrix)
self.predict(X)
def decision_function(self, X):
"""Generic decision value.
Args:
X (array-like): list of samples
"""
return [self._dist_hyperplane(s) for s in X]
| mit |
SCECcode/BBP | bbp/utils/batch/combine_dist_gof_gen.py | 1 | 9736 | #!/usr/bin/env python
"""
Copyright 2010-2017 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This program created a distance-based GOF, combining information from
all realizations into a single plot.
"""
# Import Python modules
import os
import glob
import optparse
import matplotlib
if (matplotlib.get_backend() != 'agg'):
matplotlib.use('Agg') # Disables use of Tk/X11
#import matplotlib.colors as mcolors
#import matplotlib.cm as cm
#from matplotlib.ticker import FormatStrFormatter
import pylab
import numpy
# Import Broadband modules
import plot_config
import bband_utils
# Constants
MIN_Y_AXIS = -1.75
MAX_Y_AXIS = 1.75
DIST_PERIODS = [0.01, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0]
# --------------------------------------------------------------------------
# Functions
# --------------------------------------------------------------------------
def combine_realization_data(tmpdir, period):
"""
This function reads the resid files from all realizations and
returns the combined data set
"""
data = {}
realizations = sorted(os.listdir(tmpdir))
for realization in realizations:
basedir = os.path.join(tmpdir, realization)
resid_file = glob.glob("%s%s*-resid-%.3f-rotd50.txt" %
(basedir, os.sep, period))
if len(resid_file) != 1:
raise bband_utils.ProcessingError("Residuals file not found for "
"realization %s!" % (realization))
resid_file = resid_file[0]
input_file = open(resid_file, 'r')
for line in input_file:
line = line.strip()
# Skip comments and empty lines
if line.startswith("#") or line.startswith("%") or not line:
continue
pieces = line.split()
# Make sure we have enough tokens
if len(pieces) != 2:
continue
# Convert to floats
pieces = [float(piece) for piece in pieces]
dist = pieces[0]
val = pieces[1]
if dist in data:
data[dist].append(val)
else:
data[dist] = [val]
input_file.close()
# Ok, processed all realizations, now combine the data
sta_dist_data = []
sta_min_data = []
sta_max_data = []
sta_resid_data = []
for item in data:
sta_dist_data.append(item)
sta_min_data.append(numpy.std(data[item]))
sta_max_data.append(numpy.std(data[item]))
sta_resid_data.append(numpy.mean(data[item]))
# Return the data we found
return sta_dist_data, sta_min_data, sta_max_data, sta_resid_data
def plot_combined_dist_gof(indir, outdir, codebase):
"""
This function reads data from the residuals files from multiple
realizations and plots a dist gof plot with a number of periods
"""
# Capture number of realizations and event label
num_realizations = len(os.listdir(indir))
basedir = os.path.join(indir, os.listdir(indir)[0])
resid_file = glob.glob("%s%s*-resid-%.3f-rotd50.txt" %
(basedir, os.sep, DIST_PERIODS[0]))[0]
event_label = os.path.basename(resid_file).split("-")[0]
# Collect all the data from the residuals file
all_sta_dist_data = []
all_sta_min_data = []
all_sta_max_data = []
all_sta_resid_data = []
for period in DIST_PERIODS:
(sta_dist_data,
sta_min_data,
sta_max_data,
sta_resid_data) = combine_realization_data(indir, period)
all_sta_dist_data.append(sta_dist_data)
all_sta_min_data.append(sta_min_data)
all_sta_max_data.append(sta_max_data)
all_sta_resid_data.append(sta_resid_data)
# Now create the dist GOFs
outfile = os.path.join(outdir, "gof-dist-linear-combined-%s-%s-rotd50.png" %
(codebase, event_label))
create_combined_dist_gof(all_sta_resid_data, all_sta_dist_data,
all_sta_min_data, all_sta_max_data,
event_label, num_realizations, codebase, outfile)
outfile = os.path.join(outdir, "gof-dist-log-combined-%s-%s-rotd50.png" %
(codebase, event_label))
create_combined_dist_gof(all_sta_resid_data, all_sta_dist_data,
all_sta_min_data, all_sta_max_data,
event_label, num_realizations, codebase, outfile,
log_scale=True)
def create_combined_dist_gof(all_sta_resid_data, all_sta_dist_data,
all_sta_min_data, all_sta_max_data,
event_label, num_realizations, codebase, outfile,
log_scale=False):
"""
Creates a combined gof dist plot for all the data and distances
provided
"""
plottitle = ("GOF Comparison for %s\n%d Realizations\n%s Method" %
(event_label, num_realizations, codebase.upper()))
# Create figure
num_plots = len(DIST_PERIODS)
if len(DIST_PERIODS) % 2:
num_plots = num_plots + 1
num_columns = num_plots / 2
fig, axs = pylab.plt.subplots(2, num_columns)
fig.set_size_inches(18, 8)
fig.subplots_adjust(left = 0.05, right = 0.95, top = 0.86, bottom = 0.06)
# Find max, min values for x_axis
if log_scale:
min_x = 1
else:
min_x = 0
max_x = 0
for dist in all_sta_dist_data:
# Check if not empty
if len(dist):
max_x = max(max_x, max(dist))
# If no data, set it to 90 (will get rounded to 100)
if max_x == 0:
max_x = 90
# Round to the next 10'
max_x = max_x + (10 - (max_x % 10))
if log_scale and max_x > 100:
# Round to the next 100'
max_x = max_x + (100 - (max_x % 100))
# y-axis is fixed
min_y = MIN_Y_AXIS
max_y = MAX_Y_AXIS
# Convert to list
subfigs = []
for y_subplot in range(0, 2):
for x_subplot in range(0, num_columns):
subfigs.append(axs[y_subplot, x_subplot])
# Good, now walk through each subfig
for (subfig, sta_dist_data,
sta_min_data, sta_max_data,
sta_resid_data, period) in zip(subfigs, all_sta_dist_data,
all_sta_min_data, all_sta_max_data,
all_sta_resid_data, DIST_PERIODS):
# sta_min_data = [abs(x - y) for x, y in zip(sta_resid_data,
# sta_min_data)]
# sta_max_data = [abs(x - y) for x, y in zip(sta_resid_data,
# sta_max_data)]
subfig.set_xlim(min_x, max_x)
subfig.set_ylim(min_y, max_y)
subfig.set_title("Period = %.3f s" % (period), size=8)
if DIST_PERIODS.index(period) % num_columns == 0:
subfig.set_ylabel("ln (data/model)", size=8)
subfig.tick_params(labelsize=8)
# subfig.plot(sta_dist_data, sta_resid_data, 'o', color='black',
# label='_nolegend_')
subfig.errorbar(sta_dist_data, sta_resid_data,
yerr=[sta_min_data, sta_max_data],
fmt='o', color='black', ecolor='grey',
label='_nolegend_')
subfig.plot([min_x, max_x], [0.0, 0.0],
color='grey', label='_nolegend_')
if log_scale:
subfig.set_xscale('log')
# Only add label to last row
if DIST_PERIODS.index(period) >= (2 * num_columns) / 2:
subfig.set_xlabel("Distance (km)", size=8)
fig.suptitle('%s' % (plottitle), size=12)
print "Saving dist GoF plot to %s" % (outfile)
fig.savefig(outfile, format="png", transparent=False, dpi=plot_config.dpi)
# --------------------------------------------------------------------------
# Main
# --------------------------------------------------------------------------
PARSER = optparse.OptionParser()
PARSER.add_option("-d", "--dir", dest="input_dir",
help="Input directory containing simulation results")
PARSER.add_option("-o", "--output_dir", dest="output_dir",
help="Output file")
PARSER.add_option("-c", "--codebase", dest="codebase",
help="Method used for the simulation")
(OPTIONS, ARGS) = PARSER.parse_args()
if OPTIONS.input_dir is None:
PARSER.error("Please specify the input directory!")
TOP_INPUT_DIR = OPTIONS.input_dir
if not os.path.isdir(TOP_INPUT_DIR):
PARSER.error("Invalid input directory!")
if not "Sims" in os.listdir(TOP_INPUT_DIR):
PARSER.error("Please provide the top-level simulation directory!\n"
"This is the directory given to the cluster script")
INPUT_OUTDIR = os.path.join(TOP_INPUT_DIR, "Sims" , "outdata")
if OPTIONS.output_dir is None:
PARSER.error("error specify output directory!")
else:
OUTPUT_DIR = OPTIONS.output_dir
if not os.path.isdir(OUTPUT_DIR):
PARSER.error("Invalid output directory!")
if OPTIONS.codebase is None:
PARSER.error("Please specify codebase!")
# Create combined dist gof plot
plot_combined_dist_gof(INPUT_OUTDIR, OUTPUT_DIR, OPTIONS.codebase)
# All done!
print "All Done!"
| apache-2.0 |
abhishekkrthakur/scikit-learn | sklearn/metrics/pairwise.py | 10 | 41636 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances,
}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=True)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
LohithBlaze/scikit-learn | sklearn/naive_bayes.py | 128 | 28358 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
ihc/futhark | tools/benchmark-performance-plot.py | 3 | 3759 | #!/usr/bin/env python
import numpy as np
import sys
import json
import re
import subprocess
import datetime
import matplotlib
matplotlib.use('Agg') # For headless use
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import os
_, results_dir, machine, benchmark, plotfile = sys.argv
def compute_commit_timestamps():
log = subprocess.check_output(['git', 'log', '--pretty=format:%H %at'])
result = {}
for line in log.split('\n'):
commit,timestamp = line.split(' ')
result[commit] = int(timestamp)
return result
def remove_nones(l):
return filter(lambda x: x is not None, l)
commit_timestamps = compute_commit_timestamps()
def cut_desc(s):
if s[0] == '#':
return s.split(' ')[0]
else:
return s
def extract_result(filename):
match = re.search('^{}-([a-f0-9]+).json$'.format(machine), filename)
if match:
commit=match.group(1)
results = json.load(open(results_dir + '/' + filename))
try:
benchmark_results = results['futhark-benchmarks/'+benchmark]
def get_runtime(r):
for dataset in r:
if type(r[dataset]) is dict:
return np.mean(r[dataset]['runtimes'])
runtimes={}
for dataset in benchmark_results['datasets']:
if type(benchmark_results['datasets'][dataset]) is dict:
runtimes[cut_desc(dataset)] = np.mean(benchmark_results['datasets'][dataset]['runtimes'])
return {'timestamp': commit_timestamps[commit],
'commit': commit,
'runtimes': runtimes}
except:
return None
results = remove_nones(map (extract_result,
os.listdir(results_dir)))
results.sort(key=lambda x: x['timestamp'])
if len(results) == 0:
sys.exit('No results found for benchmark {}.'.format(benchmark))
best = {}
def from_unixtime(timestamp):
return datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d')
for r in results:
time = from_unixtime(r['timestamp'])
for dataset in r['runtimes']:
if dataset not in best or r['runtimes'][dataset] < best[dataset]['runtime']:
best[dataset] = {'runtime': r['runtimes'][dataset],
'timestamp': r['timestamp'],
'commit': r['commit'] }
print r['commit'], dataset, time, r['runtimes'][dataset]
for dataset in sorted(best.keys()):
best_time = from_unixtime(best[dataset]['timestamp'])
print 'Dataset {} best: {} {} {}'.format(dataset, best_time, best[dataset]['commit'], best[dataset]['runtime'])
def make_xticks(results):
times = np.array(map(lambda x: from_unixtime(x['timestamp']), results))
return times[np.arange(0,len(times),len(times)/10)]
fig, ax = plt.subplots()
ax.set_title(benchmark)
ax.set_ylabel('Slowdown compared to fastest')
for dataset in sorted(best.keys()):
best_runtime=best[dataset]['runtime']
xs=[]
ys=[]
i = 0
for r in results:
if dataset in r['runtimes']:
xs += [i]
ys += [r['runtimes'][dataset]/best_runtime]
i += 1
ax.plot(xs, ys, label=dataset)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
grey='#aaaaaa'
xticks=make_xticks(results)
ax.set_yscale('log')
ax.set_ylim(ymin=0.9,ymax=3)
ax.yaxis.grid(color=grey,zorder=0)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax.yaxis.set_minor_formatter(FormatStrFormatter(''))
ax.set_yticks(np.arange(0.9,3,0.1))
ax.set_xticks(1+np.arange(len(xticks))*(len(results)/10))
ax.set_xticklabels(xticks, rotation=-45)
plt.rc('text')
plt.savefig(plotfile, bbox_inches='tight')
| isc |
GrimDerp/numpy | numpy/fft/fftpack.py | 72 | 45497 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/ensemble/_hist_gradient_boosting/tests/test_splitting.py | 10 | 31920 | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn.ensemble._hist_gradient_boosting.common import HISTOGRAM_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import X_BINNED_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import MonotonicConstraint
from sklearn.ensemble._hist_gradient_boosting.splitting import (
Splitter,
compute_node_value
)
from sklearn.ensemble._hist_gradient_boosting.histogram import HistogramBuilder
from sklearn.utils._testing import skip_if_32bit
@pytest.mark.parametrize('n_bins', [3, 32, 256])
def test_histogram_split(n_bins):
rng = np.random.RandomState(42)
feature_idx = 0
l2_regularization = 0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
X_binned = np.asfortranarray(
rng.randint(0, n_bins - 1, size=(int(1e4), 1)), dtype=X_BINNED_DTYPE)
binned_feature = X_binned.T[feature_idx]
sample_indices = np.arange(binned_feature.shape[0], dtype=np.uint32)
ordered_hessians = np.ones_like(binned_feature, dtype=G_H_DTYPE)
all_hessians = ordered_hessians
sum_hessians = all_hessians.sum()
hessians_are_constant = False
for true_bin in range(1, n_bins - 2):
for sign in [-1, 1]:
ordered_gradients = np.full_like(binned_feature, sign,
dtype=G_H_DTYPE)
ordered_gradients[binned_feature <= true_bin] *= -1
all_gradients = ordered_gradients
sum_gradients = all_gradients.sum()
builder = HistogramBuilder(X_binned,
n_bins,
all_gradients,
all_hessians,
hessians_are_constant)
n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1],
dtype=np.uint32)
has_missing_values = np.array([False] * X_binned.shape[1],
dtype=np.uint8)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1],
dtype=np.int8)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(X_binned,
n_bins_non_missing,
missing_values_bin_idx,
has_missing_values,
is_categorical,
monotonic_cst,
l2_regularization,
min_hessian_to_split,
min_samples_leaf, min_gain_to_split,
hessians_are_constant)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(sum_gradients, sum_hessians,
-np.inf, np.inf, l2_regularization)
split_info = splitter.find_node_split(
sample_indices.shape[0], histograms, sum_gradients,
sum_hessians, value)
assert split_info.bin_idx == true_bin
assert split_info.gain >= 0
assert split_info.feature_idx == feature_idx
assert (split_info.n_samples_left + split_info.n_samples_right
== sample_indices.shape[0])
# Constant hessian: 1. per sample.
assert split_info.n_samples_left == split_info.sum_hessian_left
@skip_if_32bit
@pytest.mark.parametrize('constant_hessian', [True, False])
def test_gradient_and_hessian_sanity(constant_hessian):
# This test checks that the values of gradients and hessians are
# consistent in different places:
# - in split_info: si.sum_gradient_left + si.sum_gradient_right must be
# equal to the gradient at the node. Same for hessians.
# - in the histograms: summing 'sum_gradients' over the bins must be
# constant across all features, and those sums must be equal to the
# node's gradient. Same for hessians.
rng = np.random.RandomState(42)
n_bins = 10
n_features = 20
n_samples = 500
l2_regularization = 0.
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
X_binned = rng.randint(0, n_bins, size=(n_samples, n_features),
dtype=X_BINNED_DTYPE)
X_binned = np.asfortranarray(X_binned)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = rng.randn(n_samples).astype(G_H_DTYPE)
sum_gradients = all_gradients.sum()
if constant_hessian:
all_hessians = np.ones(1, dtype=G_H_DTYPE)
sum_hessians = 1 * n_samples
else:
all_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE)
sum_hessians = all_hessians.sum()
builder = HistogramBuilder(X_binned, n_bins, all_gradients,
all_hessians, constant_hessian)
n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1],
dtype=np.uint32)
has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1],
dtype=np.int8)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(X_binned, n_bins_non_missing, missing_values_bin_idx,
has_missing_values, is_categorical, monotonic_cst,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split, constant_hessian)
hists_parent = builder.compute_histograms_brute(sample_indices)
value_parent = compute_node_value(sum_gradients, sum_hessians,
-np.inf, np.inf, l2_regularization)
si_parent = splitter.find_node_split(n_samples, hists_parent,
sum_gradients, sum_hessians,
value_parent)
sample_indices_left, sample_indices_right, _ = splitter.split_indices(
si_parent, sample_indices)
hists_left = builder.compute_histograms_brute(sample_indices_left)
value_left = compute_node_value(si_parent.sum_gradient_left,
si_parent.sum_hessian_left,
-np.inf, np.inf, l2_regularization)
hists_right = builder.compute_histograms_brute(sample_indices_right)
value_right = compute_node_value(si_parent.sum_gradient_right,
si_parent.sum_hessian_right,
-np.inf, np.inf, l2_regularization)
si_left = splitter.find_node_split(n_samples, hists_left,
si_parent.sum_gradient_left,
si_parent.sum_hessian_left,
value_left)
si_right = splitter.find_node_split(n_samples, hists_right,
si_parent.sum_gradient_right,
si_parent.sum_hessian_right,
value_right)
# make sure that si.sum_gradient_left + si.sum_gradient_right have their
# expected value, same for hessians
for si, indices in (
(si_parent, sample_indices),
(si_left, sample_indices_left),
(si_right, sample_indices_right)):
gradient = si.sum_gradient_right + si.sum_gradient_left
expected_gradient = all_gradients[indices].sum()
hessian = si.sum_hessian_right + si.sum_hessian_left
if constant_hessian:
expected_hessian = indices.shape[0] * all_hessians[0]
else:
expected_hessian = all_hessians[indices].sum()
assert np.isclose(gradient, expected_gradient)
assert np.isclose(hessian, expected_hessian)
# make sure sum of gradients in histograms are the same for all features,
# and make sure they're equal to their expected value
hists_parent = np.asarray(hists_parent, dtype=HISTOGRAM_DTYPE)
hists_left = np.asarray(hists_left, dtype=HISTOGRAM_DTYPE)
hists_right = np.asarray(hists_right, dtype=HISTOGRAM_DTYPE)
for hists, indices in (
(hists_parent, sample_indices),
(hists_left, sample_indices_left),
(hists_right, sample_indices_right)):
# note: gradients and hessians have shape (n_features,),
# we're comparing them to *scalars*. This has the benefit of also
# making sure that all the entries are equal across features.
gradients = hists['sum_gradients'].sum(axis=1) # shape = (n_features,)
expected_gradient = all_gradients[indices].sum() # scalar
hessians = hists['sum_hessians'].sum(axis=1)
if constant_hessian:
# 0 is not the actual hessian, but it's not computed in this case
expected_hessian = 0.
else:
expected_hessian = all_hessians[indices].sum()
assert np.allclose(gradients, expected_gradient)
assert np.allclose(hessians, expected_hessian)
def test_split_indices():
# Check that split_indices returns the correct splits and that
# splitter.partition is consistent with what is returned.
rng = np.random.RandomState(421)
n_bins = 5
n_samples = 10
l2_regularization = 0.
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
# split will happen on feature 1 and on bin 3
X_binned = [[0, 0],
[0, 3],
[0, 4],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 4],
[0, 0],
[0, 4]]
X_binned = np.asfortranarray(X_binned, dtype=X_BINNED_DTYPE)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = rng.randn(n_samples).astype(G_H_DTYPE)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
sum_gradients = all_gradients.sum()
sum_hessians = 1 * n_samples
hessians_are_constant = True
builder = HistogramBuilder(X_binned, n_bins,
all_gradients, all_hessians,
hessians_are_constant)
n_bins_non_missing = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1],
dtype=np.int8)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(X_binned, n_bins_non_missing, missing_values_bin_idx,
has_missing_values, is_categorical, monotonic_cst,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split,
hessians_are_constant)
assert np.all(sample_indices == splitter.partition)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(sum_gradients, sum_hessians,
-np.inf, np.inf, l2_regularization)
si_root = splitter.find_node_split(n_samples, histograms,
sum_gradients, sum_hessians, value)
# sanity checks for best split
assert si_root.feature_idx == 1
assert si_root.bin_idx == 3
samples_left, samples_right, position_right = splitter.split_indices(
si_root, splitter.partition)
assert set(samples_left) == set([0, 1, 3, 4, 5, 6, 8])
assert set(samples_right) == set([2, 7, 9])
assert list(samples_left) == list(splitter.partition[:position_right])
assert list(samples_right) == list(splitter.partition[position_right:])
# Check that the resulting split indices sizes are consistent with the
# count statistics anticipated when looking for the best split.
assert samples_left.shape[0] == si_root.n_samples_left
assert samples_right.shape[0] == si_root.n_samples_right
def test_min_gain_to_split():
# Try to split a pure node (all gradients are equal, same for hessians)
# with min_gain_to_split = 0 and make sure that the node is not split (best
# possible gain = -1). Note: before the strict inequality comparison, this
# test would fail because the node would be split with a gain of 0.
rng = np.random.RandomState(42)
l2_regularization = 0
min_hessian_to_split = 0
min_samples_leaf = 1
min_gain_to_split = 0.
n_bins = 255
n_samples = 100
X_binned = np.asfortranarray(
rng.randint(0, n_bins, size=(n_samples, 1)), dtype=X_BINNED_DTYPE)
binned_feature = X_binned[:, 0]
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_hessians = np.ones_like(binned_feature, dtype=G_H_DTYPE)
all_gradients = np.ones_like(binned_feature, dtype=G_H_DTYPE)
sum_gradients = all_gradients.sum()
sum_hessians = all_hessians.sum()
hessians_are_constant = False
builder = HistogramBuilder(X_binned, n_bins, all_gradients,
all_hessians, hessians_are_constant)
n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1],
dtype=np.uint32)
has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1],
dtype=np.int8)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(X_binned, n_bins_non_missing, missing_values_bin_idx,
has_missing_values, is_categorical, monotonic_cst,
l2_regularization,
min_hessian_to_split, min_samples_leaf,
min_gain_to_split, hessians_are_constant)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(sum_gradients, sum_hessians,
-np.inf, np.inf, l2_regularization)
split_info = splitter.find_node_split(n_samples, histograms,
sum_gradients, sum_hessians, value)
assert split_info.gain == -1
@pytest.mark.parametrize(
'X_binned, all_gradients, has_missing_values, n_bins_non_missing, '
' expected_split_on_nan, expected_bin_idx, expected_go_to_left', [
# basic sanity check with no missing values: given the gradient
# values, the split must occur on bin_idx=3
([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], # X_binned
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5], # gradients
False, # no missing values
10, # n_bins_non_missing
False, # don't split on nans
3, # expected_bin_idx
'not_applicable'),
# We replace 2 samples by NaNs (bin_idx=8)
# These 2 samples were mapped to the left node before, so they should
# be mapped to left node again
# Notice how the bin_idx threshold changes from 3 to 1.
([8, 0, 1, 8, 2, 3, 4, 5, 6, 7], # 8 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
8, # n_bins_non_missing
False, # don't split on nans
1, # cut on bin_idx=1
True), # missing values go to left
# same as above, but with non-consecutive missing_values_bin
([9, 0, 1, 9, 2, 3, 4, 5, 6, 7], # 9 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
8, # n_bins_non_missing
False, # don't split on nans
1, # cut on bin_idx=1
True), # missing values go to left
# this time replacing 2 samples that were on the right.
([0, 1, 2, 3, 8, 4, 8, 5, 6, 7], # 8 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
8, # n_bins_non_missing
False, # don't split on nans
3, # cut on bin_idx=3 (like in first case)
False), # missing values go to right
# same as above, but with non-consecutive missing_values_bin
([0, 1, 2, 3, 9, 4, 9, 5, 6, 7], # 9 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
8, # n_bins_non_missing
False, # don't split on nans
3, # cut on bin_idx=3 (like in first case)
False), # missing values go to right
# For the following cases, split_on_nans is True (we replace all of
# the samples with nans, instead of just 2).
([0, 1, 2, 3, 4, 4, 4, 4, 4, 4], # 4 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
4, # n_bins_non_missing
True, # split on nans
3, # cut on bin_idx=3
False), # missing values go to right
# same as above, but with non-consecutive missing_values_bin
([0, 1, 2, 3, 9, 9, 9, 9, 9, 9], # 9 <=> missing
[1, 1, 1, 1, 1, 1, 5, 5, 5, 5],
True, # missing values
4, # n_bins_non_missing
True, # split on nans
3, # cut on bin_idx=3
False), # missing values go to right
([6, 6, 6, 6, 0, 1, 2, 3, 4, 5], # 6 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
6, # n_bins_non_missing
True, # split on nans
5, # cut on bin_idx=5
False), # missing values go to right
# same as above, but with non-consecutive missing_values_bin
([9, 9, 9, 9, 0, 1, 2, 3, 4, 5], # 9 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
6, # n_bins_non_missing
True, # split on nans
5, # cut on bin_idx=5
False), # missing values go to right
]
)
def test_splitting_missing_values(X_binned, all_gradients,
has_missing_values, n_bins_non_missing,
expected_split_on_nan, expected_bin_idx,
expected_go_to_left):
# Make sure missing values are properly supported.
# we build an artificial example with gradients such that the best split
# is on bin_idx=3, when there are no missing values.
# Then we introduce missing values and:
# - make sure the chosen bin is correct (find_best_bin()): it's
# still the same split, even though the index of the bin may change
# - make sure the missing values are mapped to the correct child
# (split_indices())
n_bins = max(X_binned) + 1
n_samples = len(X_binned)
l2_regularization = 0.
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
sample_indices = np.arange(n_samples, dtype=np.uint32)
X_binned = np.array(X_binned, dtype=X_BINNED_DTYPE).reshape(-1, 1)
X_binned = np.asfortranarray(X_binned)
all_gradients = np.array(all_gradients, dtype=G_H_DTYPE)
has_missing_values = np.array([has_missing_values], dtype=np.uint8)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
sum_gradients = all_gradients.sum()
sum_hessians = 1 * n_samples
hessians_are_constant = True
builder = HistogramBuilder(X_binned, n_bins,
all_gradients, all_hessians,
hessians_are_constant)
n_bins_non_missing = np.array([n_bins_non_missing], dtype=np.uint32)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1],
dtype=np.int8)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(X_binned, n_bins_non_missing,
missing_values_bin_idx, has_missing_values,
is_categorical, monotonic_cst,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split,
hessians_are_constant)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(sum_gradients, sum_hessians,
-np.inf, np.inf, l2_regularization)
split_info = splitter.find_node_split(n_samples, histograms,
sum_gradients, sum_hessians, value)
assert split_info.bin_idx == expected_bin_idx
if has_missing_values:
assert split_info.missing_go_to_left == expected_go_to_left
split_on_nan = split_info.bin_idx == n_bins_non_missing[0] - 1
assert split_on_nan == expected_split_on_nan
# Make sure the split is properly computed.
# This also make sure missing values are properly assigned to the correct
# child in split_indices()
samples_left, samples_right, _ = splitter.split_indices(
split_info, splitter.partition)
if not expected_split_on_nan:
# When we don't split on nans, the split should always be the same.
assert set(samples_left) == set([0, 1, 2, 3])
assert set(samples_right) == set([4, 5, 6, 7, 8, 9])
else:
# When we split on nans, samples with missing values are always mapped
# to the right child.
missing_samples_indices = np.flatnonzero(
np.array(X_binned) == missing_values_bin_idx)
non_missing_samples_indices = np.flatnonzero(
np.array(X_binned) != missing_values_bin_idx)
assert set(samples_right) == set(missing_samples_indices)
assert set(samples_left) == set(non_missing_samples_indices)
@pytest.mark.parametrize(
'X_binned, has_missing_values, n_bins_non_missing, ', [
# one category
([0] * 20, False, 1),
# all categories appear less than MIN_CAT_SUPPORT (hardcoded to 10)
([0] * 9 + [1] * 8, False, 2),
# only one category appears more than MIN_CAT_SUPPORT
([0] * 12 + [1] * 8, False, 2),
# missing values + category appear less than MIN_CAT_SUPPORT
# 9 is missing
([0] * 9 + [1] * 8 + [9] * 4, True, 2),
# no non-missing category
([9] * 11, True, 0),
])
def test_splitting_categorical_cat_smooth(X_binned, has_missing_values,
n_bins_non_missing):
# Checks categorical splits are correct when the MIN_CAT_SUPPORT constraint
# isn't respected: there are no splits
n_bins = max(X_binned) + 1
n_samples = len(X_binned)
X_binned = np.array([X_binned], dtype=X_BINNED_DTYPE).T
X_binned = np.asfortranarray(X_binned)
l2_regularization = 0.0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.0
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = np.ones(n_samples, dtype=G_H_DTYPE)
has_missing_values = np.array([has_missing_values], dtype=np.uint8)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
sum_gradients = all_gradients.sum()
sum_hessians = n_samples
hessians_are_constant = True
builder = HistogramBuilder(X_binned, n_bins, all_gradients,
all_hessians, hessians_are_constant)
n_bins_non_missing = np.array([n_bins_non_missing], dtype=np.uint32)
monotonic_cst = np.array([MonotonicConstraint.NO_CST] * X_binned.shape[1],
dtype=np.int8)
is_categorical = np.ones_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(X_binned, n_bins_non_missing,
missing_values_bin_idx, has_missing_values,
is_categorical, monotonic_cst,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split,
hessians_are_constant)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(sum_gradients, sum_hessians,
-np.inf, np.inf, l2_regularization)
split_info = splitter.find_node_split(n_samples, histograms,
sum_gradients, sum_hessians, value)
# no split found
assert split_info.gain == -1
def _assert_categories_equals_bitset(categories, bitset):
# assert that the bitset exactly corresponds to the categories
# bitset is assumed to be an array of 8 uint32 elements
# form bitset from threshold
expected_bitset = np.zeros(8, dtype=np.uint32)
for cat in categories:
idx = cat // 32
shift = cat % 32
expected_bitset[idx] |= 1 << shift
# check for equality
assert_array_equal(expected_bitset, bitset)
@pytest.mark.parametrize(
"X_binned, all_gradients, expected_categories_left, n_bins_non_missing,"
"missing_values_bin_idx, has_missing_values, expected_missing_go_to_left",
[
# 4 categories
([0, 1, 2, 3] * 11, # X_binned
[10, 1, 10, 10] * 11, # all_gradients
[1], # expected_categories_left
4, # n_bins_non_missing
4, # missing_values_bin_idx
False, # has_missing_values
None), # expected_missing_go_to_left, unchecked
# Make sure that the categories that are on the right (second half) of
# the sorted categories array can still go in the left child. In this
# case, the best split was found when scanning from right to left.
([0, 1, 2, 3] * 11, # X_binned
[10, 10, 10, 1] * 11, # all_gradients
[3], # expected_categories_left
4, # n_bins_non_missing
4, # missing_values_bin_idx
False, # has_missing_values
None), # expected_missing_go_to_left, unchecked
# categories that don't respect MIN_CAT_SUPPORT (cat 4) are always
# mapped to the right child
([0, 1, 2, 3] * 11 + [4] * 5, # X_binned
[10, 10, 10, 1] * 11 + [10] * 5, # all_gradients
[3], # expected_categories_left
4, # n_bins_non_missing
4, # missing_values_bin_idx
False, # has_missing_values
None), # expected_missing_go_to_left, unchecked
# categories that don't respect MIN_CAT_SUPPORT are always mapped to
# the right child: in this case a more sensible split could have been
# 3, 4 - 0, 1, 2
# But the split is still 3 - 0, 1, 2, 4. this is because we only scan
# up to the middle of the sorted category array (0, 1, 2, 3), and
# because we exclude cat 4 in this array.
([0, 1, 2, 3] * 11 + [4] * 5, # X_binned
[10, 10, 10, 1] * 11 + [1] * 5, # all_gradients
[3], # expected_categories_left
4, # n_bins_non_missing
4, # missing_values_bin_idx
False, # has_missing_values
None), # expected_missing_go_to_left, unchecked
# 4 categories with missing values that go to the right
([0, 1, 2] * 11 + [9] * 11, # X_binned
[10, 1, 10] * 11 + [10] * 11, # all_gradients
[1], # expected_categories_left
3, # n_bins_non_missing
9, # missing_values_bin_idx
True, # has_missing_values
False), # expected_missing_go_to_left
# 4 categories with missing values that go to the left
([0, 1, 2] * 11 + [9] * 11, # X_binned
[10, 1, 10] * 11 + [1] * 11, # all_gradients
[1, 9], # expected_categories_left
3, # n_bins_non_missing
9, # missing_values_bin_idx
True, # has_missing_values
True), # expected_missing_go_to_left
# split is on the missing value
([0, 1, 2, 3, 4] * 11 + [255] * 12, # X_binned
[10, 10, 10, 10, 10] * 11 + [1] * 12, # all_gradients
[255], # expected_categories_left
5, # n_bins_non_missing
255, # missing_values_bin_idx
True, # has_missing_values
True), # expected_missing_go_to_left
# split on even categories
(list(range(60)) * 12, # X_binned
[10, 1] * 360, # all_gradients
list(range(1, 60, 2)), # expected_categories_left
59, # n_bins_non_missing
59, # missing_values_bin_idx
True, # has_missing_values
True), # expected_missing_go_to_left
# split on every 8 categories
(list(range(256)) * 12, # X_binned
[10, 10, 10, 10, 10, 10, 10, 1] * 384, # all_gradients
list(range(7, 256, 8)), # expected_categories_left
255, # n_bins_non_missing
255, # missing_values_bin_idx
True, # has_missing_values
True), # expected_missing_go_to_left
])
def test_splitting_categorical_sanity(X_binned, all_gradients,
expected_categories_left,
n_bins_non_missing,
missing_values_bin_idx,
has_missing_values,
expected_missing_go_to_left):
# Tests various combinations of categorical splits
n_samples = len(X_binned)
n_bins = max(X_binned) + 1
X_binned = np.array(X_binned, dtype=X_BINNED_DTYPE).reshape(-1, 1)
X_binned = np.asfortranarray(X_binned)
l2_regularization = 0.0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = np.array(all_gradients, dtype=G_H_DTYPE)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
has_missing_values = np.array([has_missing_values], dtype=np.uint8)
sum_gradients = all_gradients.sum()
sum_hessians = n_samples
hessians_are_constant = True
builder = HistogramBuilder(X_binned, n_bins, all_gradients,
all_hessians, hessians_are_constant)
n_bins_non_missing = np.array([n_bins_non_missing], dtype=np.uint32)
monotonic_cst = np.array([MonotonicConstraint.NO_CST] * X_binned.shape[1],
dtype=np.int8)
is_categorical = np.ones_like(monotonic_cst, dtype=np.uint8)
splitter = Splitter(X_binned, n_bins_non_missing,
missing_values_bin_idx, has_missing_values,
is_categorical, monotonic_cst,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split,
hessians_are_constant)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(sum_gradients, sum_hessians,
-np.inf, np.inf, l2_regularization)
split_info = splitter.find_node_split(n_samples, histograms,
sum_gradients, sum_hessians, value)
assert split_info.is_categorical
assert split_info.gain > 0
_assert_categories_equals_bitset(expected_categories_left,
split_info.left_cat_bitset)
if has_missing_values:
assert split_info.missing_go_to_left == expected_missing_go_to_left
# If there is no missing value during training, the flag missing_go_to_left
# is set later in the grower.
# make sure samples are split correctly
samples_left, samples_right, _ = splitter.split_indices(
split_info, splitter.partition)
left_mask = np.isin(X_binned.ravel(), expected_categories_left)
assert_array_equal(sample_indices[left_mask], samples_left)
assert_array_equal(sample_indices[~left_mask], samples_right)
| bsd-3-clause |
jseabold/statsmodels | statsmodels/tsa/statespace/tests/test_save.py | 3 | 4402 | """
Tests of save / load / remove_data state space functionality.
"""
import pickle
import os
import tempfile
import pytest
from statsmodels import datasets
from statsmodels.tsa.statespace import (sarimax, structural, varmax,
dynamic_factor)
from numpy.testing import assert_allclose
current_path = os.path.dirname(os.path.abspath(__file__))
macrodata = datasets.macrodata.load_pandas().data
@pytest.fixture()
def temp_filename():
fd, filename = tempfile.mkstemp()
yield filename
try:
os.close(fd)
os.unlink(filename)
except Exception:
print("Couldn't close or delete file "
"{filename}.".format(filename=filename))
def test_sarimax(temp_filename):
mod = sarimax.SARIMAX(macrodata['realgdp'].values, order=(4, 1, 0))
res = mod.smooth(mod.start_params)
res.summary()
res.save(temp_filename)
res2 = sarimax.SARIMAXResults.load(temp_filename)
assert_allclose(res.params, res2.params)
assert_allclose(res.bse, res2.bse)
assert_allclose(res.llf, res2.llf)
def test_sarimax_pickle():
mod = sarimax.SARIMAX(macrodata['realgdp'].values, order=(4, 1, 0))
pkl_mod = pickle.loads(pickle.dumps(mod))
res = mod.smooth(mod.start_params)
pkl_res = pkl_mod.smooth(mod.start_params)
assert_allclose(res.params, pkl_res.params)
assert_allclose(res.bse, pkl_res.bse)
assert_allclose(res.llf, pkl_res.llf)
def test_structural(temp_filename):
mod = structural.UnobservedComponents(
macrodata['realgdp'].values, 'llevel')
res = mod.smooth(mod.start_params)
res.summary()
res.save(temp_filename)
res2 = structural.UnobservedComponentsResults.load(temp_filename)
assert_allclose(res.params, res2.params)
assert_allclose(res.bse, res2.bse)
assert_allclose(res.llf, res2.llf)
def test_structural_pickle():
mod = structural.UnobservedComponents(
macrodata['realgdp'].values, 'llevel')
pkl_mod = pickle.loads(pickle.dumps(mod))
res = mod.smooth(mod.start_params)
pkl_res = pkl_mod.smooth(pkl_mod.start_params)
assert_allclose(res.params, pkl_res.params)
assert_allclose(res.bse, pkl_res.bse)
assert_allclose(res.llf, pkl_res.llf)
def test_dynamic_factor(temp_filename):
mod = dynamic_factor.DynamicFactor(
macrodata[['realgdp', 'realcons']].diff().iloc[1:].values, k_factors=1,
factor_order=1)
res = mod.smooth(mod.start_params)
res.summary()
res.save(temp_filename)
res2 = dynamic_factor.DynamicFactorResults.load(temp_filename)
assert_allclose(res.params, res2.params)
assert_allclose(res.bse, res2.bse)
assert_allclose(res.llf, res2.llf)
def test_dynamic_factor_pickle(temp_filename):
mod = varmax.VARMAX(
macrodata[['realgdp', 'realcons']].diff().iloc[1:].values,
order=(1, 0))
pkl_mod = pickle.loads(pickle.dumps(mod))
res = mod.smooth(mod.start_params)
pkl_res = pkl_mod.smooth(mod.start_params)
assert_allclose(res.params, pkl_res.params)
assert_allclose(res.bse, pkl_res.bse)
assert_allclose(res.llf, pkl_res.llf)
res.summary()
res.save(temp_filename)
res2 = varmax.VARMAXResults.load(temp_filename)
assert_allclose(res.params, res2.params)
assert_allclose(res.bse, res2.bse)
assert_allclose(res.llf, res2.llf)
def test_varmax(temp_filename):
mod = varmax.VARMAX(
macrodata[['realgdp', 'realcons']].diff().iloc[1:].values,
order=(1, 0))
res = mod.smooth(mod.start_params)
res.summary()
res.save(temp_filename)
res2 = varmax.VARMAXResults.load(temp_filename)
assert_allclose(res.params, res2.params)
assert_allclose(res.bse, res2.bse)
assert_allclose(res.llf, res2.llf)
def test_varmax_pickle(temp_filename):
mod = varmax.VARMAX(
macrodata[['realgdp', 'realcons']].diff().iloc[1:].values,
order=(1, 0))
res = mod.smooth(mod.start_params)
res.summary()
res.save(temp_filename)
res2 = varmax.VARMAXResults.load(temp_filename)
assert_allclose(res.params, res2.params)
assert_allclose(res.bse, res2.bse)
assert_allclose(res.llf, res2.llf)
def test_existing_pickle():
pkl_file = os.path.join(current_path, 'results', 'sm-0.9-sarimax.pkl')
loaded = sarimax.SARIMAXResults.load(pkl_file)
assert isinstance(loaded, sarimax.SARIMAXResultsWrapper)
| bsd-3-clause |
abhishekgahlot/scikit-learn | sklearn/neighbors/nearest_centroid.py | 10 | 7258 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..externals.six.moves import xrange
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in y_ind:
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, "centroids_"):
raise AttributeError("Model has not been trained yet.")
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
murali-munna/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
benoitsteiner/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans_test.py | 44 | 19373 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators import kmeans as kmeans_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments,
np.add.reduce(offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self, batch_size=None, points=None, randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(self.num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies([q.enqueue_many(
math_ops.range(self.num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0,
_init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(self.batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig(tf_random_seed=tf_random_seed)
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
steps = 10 * self.num_points // self.batch_size
kmeans.fit(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=learn.RunConfig(tf_random_seed=14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.fit(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertNear(self.true_score, score, self.true_score * 0.01)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.clusters()
# Make a small test set
num_points = 10
points, true_assignments, true_offsets = make_random_points(clusters,
num_points)
# Test predict
assignments = list(kmeans.predict_cluster_idx(input_fn=self.input_fn(
batch_size=num_points, points=points, num_epochs=1)))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(
input_fn=lambda: (constant_op.constant(points), None))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) - 2 * np.dot(
points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_fit_raise_if_num_clusters_larger_than_num_points_random_init(self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError('less'):
kmeans = learn.KMeansClustering(
num_clusters=3,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT)
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None),
steps=10)
def test_fit_raise_if_num_clusters_larger_than_num_points_kmeans_plus_plus(
self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError(AssertionError):
kmeans = learn.KMeansClustering(
num_clusters=3,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT)
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None),
steps=10)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(
normalize(self.points)[0:4, :], axis=0, keepdims=True))[0],
normalize(
np.mean(
normalize(self.points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = self.kmeans.transform(input_fn=self.input_fn(
batch_size=self.num_points))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
assignments = list(self.kmeans.predict_cluster_idx(
input_fn=self.input_fn(num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(input_fn=self.input_fn(
batch_size=self.num_points), steps=1)
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(
normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(
normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(np.mean(
normalize(points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.clusters())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (
input_lib.limit_epochs(constant_op.constant(points), num_epochs=1),
None)
assignments = list(kmeans.predict_cluster_idx(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=run_config.RunConfig(tf_random_seed=3))
tf_kmeans.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=50)
_ = tf_kmeans.clusters()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None),
steps=1))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
naturali/tensorflow | tensorflow/examples/skflow/resnet.py | 12 | 5640 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example builds deep residual network for mnist data.
Reference Paper: http://arxiv.org/pdf/1512.03385.pdf
Note that this is still a work-in-progress. Feel free to submit a PR
to make this better.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from math import sqrt
import os
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.examples.tutorials.mnist import input_data
def res_net(x, y, activation=tf.nn.relu):
"""Builds a residual network.
Note that if the input tensor is 2D, it must be square in order to be
converted to a 4D tensor.
Borrowed structure from:
github.com/pkmital/tensorflow_tutorials/blob/master/10_residual_network.py
Args:
x: Input of the network
y: Output of the network
activation: Activation function to apply after each convolution
Returns:
Predictions and loss tensors.
"""
# Configurations for each bottleneck group.
BottleneckGroup = namedtuple(
'BottleneckGroup', ['num_blocks', 'num_filters', 'bottleneck_size'])
groups = [BottleneckGroup(3, 128, 32),
BottleneckGroup(3, 256, 64),
BottleneckGroup(3, 512, 128),
BottleneckGroup(3, 1024, 256)]
input_shape = x.get_shape().as_list()
# Reshape the input into the right shape if it's 2D tensor
if len(input_shape) == 2:
ndim = int(sqrt(input_shape[1]))
x = tf.reshape(x, [-1, ndim, ndim, 1])
# First convolution expands to 64 channels
with tf.variable_scope('conv_layer1'):
net = learn.ops.conv2d(x, 64, [7, 7], batch_norm=True,
activation=activation, bias=False)
# Max pool
net = tf.nn.max_pool(
net, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
# First chain of resnets
with tf.variable_scope('conv_layer2'):
net = learn.ops.conv2d(net, groups[0].num_filters,
[1, 1], [1, 1, 1, 1],
padding='VALID', bias=True)
# Create the bottleneck groups, each of which contains `num_blocks`
# bottleneck groups.
for group_i, group in enumerate(groups):
for block_i in range(group.num_blocks):
name = 'group_%d/block_%d' % (group_i, block_i)
# 1x1 convolution responsible for reducing dimension
with tf.variable_scope(name + '/conv_in'):
conv = learn.ops.conv2d(net, group.bottleneck_size,
[1, 1], [1, 1, 1, 1],
padding='VALID',
activation=activation,
batch_norm=True,
bias=False)
with tf.variable_scope(name + '/conv_bottleneck'):
conv = learn.ops.conv2d(conv, group.bottleneck_size,
[3, 3], [1, 1, 1, 1],
padding='SAME',
activation=activation,
batch_norm=True,
bias=False)
# 1x1 convolution responsible for restoring dimension
with tf.variable_scope(name + '/conv_out'):
input_dim = net.get_shape()[-1].value
conv = learn.ops.conv2d(conv, input_dim,
[1, 1], [1, 1, 1, 1],
padding='VALID',
activation=activation,
batch_norm=True,
bias=False)
# shortcut connections that turn the network into its counterpart
# residual function (identity shortcut)
net = conv + net
try:
# upscale to the next group size
next_group = groups[group_i + 1]
with tf.variable_scope('block_%d/conv_upscale' % group_i):
net = learn.ops.conv2d(net, next_group.num_filters,
[1, 1], [1, 1, 1, 1],
bias=False,
padding='SAME')
except IndexError:
pass
net_shape = net.get_shape().as_list()
net = tf.nn.avg_pool(net,
ksize=[1, net_shape[1], net_shape[2], 1],
strides=[1, 1, 1, 1], padding='VALID')
net_shape = net.get_shape().as_list()
net = tf.reshape(net, [-1, net_shape[1] * net_shape[2] * net_shape[3]])
return learn.models.logistic_regression(net, y)
# Download and load MNIST data.
mnist = input_data.read_data_sets('MNIST_data')
# Restore model if graph is saved into a folder.
if os.path.exists('models/resnet/graph.pbtxt'):
classifier = learn.TensorFlowEstimator.restore('models/resnet/')
while True:
# Train model and save summaries into logdir.
classifier.fit(
mnist.train.images, mnist.train.labels, logdir='models/resnet/')
# Calculate accuracy.
score = metrics.accuracy_score(
mnist.test.labels, classifier.predict(mnist.test.images, batch_size=64))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
HealthCatalystSLC/healthcareai-py | healthcareai/tests/test_predict.py | 4 | 1135 | import unittest
from sklearn.linear_model import LinearRegression
from healthcareai.common.healthcareai_error import HealthcareAIError
from healthcareai.common.predict import validate_estimator
class TestPredictValidation(unittest.TestCase):
def test_predict_validation_should_raise_error_on_non_estimator(self):
self.assertRaises(HealthcareAIError, validate_estimator, 'foo')
def test_predict_validation_error_message_on_non_estimator(self):
non_estimator_junk_data = 'foo'
try:
validate_estimator(non_estimator_junk_data)
# Fail the test if no error is raised
self.fail()
except HealthcareAIError as e:
expected_message = 'Predictions require an estimator. You passed in foo, which is of type: {}'.format(
type(non_estimator_junk_data))
self.assertEqual(expected_message, e.message)
def test_predict_validation_should_be_true_with_instance_of_scikit_estimator(self):
estimator = LinearRegression()
self.assertTrue(validate_estimator(estimator))
if __name__ == '__main__':
unittest.main()
| mit |
csutorasr/BMEVIAUAL00 | model/alg.py | 1 | 7812 | import numpy as np
import math
import util
import xmlh
import sys
import os
from os.path import isfile, join
from keras.models import load_model
from sklearn import preprocessing
class Algorithm:
"""
Collection of algorithms to calculate the handedness of the writer of the given text.
For basic usage import ...
"""
def __init__(self, file_name):
self.model = load_model('right_left.h5')
self.strokes = []
self.h_line_indexes = []
self.length = None
self.file_name = file_name
self.right_strokes = []
self.load_data()
def load_data(self):
"""
Loads the data from the xml, and calculates the horizontal lines.
:return:
"""
try:
self.strokes = xmlh.build_structure(self.file_name)
except IOError as e:
print('I/O error({0}): {1}.'.format(e.errno, e.strerror))
self.h_line_indexes = self.predict(self.standardize_input(self.create_stroke_statistics()))
def predict(self, stroke_params):
"""
Predicts the horizontal lines, using the loaded model.
:param stroke_params: Standardized n by 4 array of stroke parameters.
:return: Indexes of the horizontal lines.
"""
stroke_params = np.array(stroke_params)
stroke_params = stroke_params.reshape(stroke_params.shape[0], stroke_params.shape[1])
h_lines = []
output = []
for params in stroke_params:
output.append(self.model.predict(np.array(params).reshape(-1, 4)))
for index, stroke in enumerate(self.strokes):
if output[index] > 0.2 and self.length[index] > 0.15:
h_lines.append(index)
return h_lines
def get_horizontal_lines(self):
return self.h_line_indexes
@staticmethod
def get_stroke_parameters(stroke):
"""
Calculates the parameters of a stroke and concatenates it
with the predetermined horizontal value.
:param stroke: A single stroke of the text.
:return: The calculated parameters of the stroke. The values are Nan,
if the stroke is too short.
"""
h_line_avg_distance = 0
d_line_avg_distance = 0
stroke_length = 0
avg_degree = 0
# In case of comas, dots, or xml errors the stroke is marked with null values, and will be removed from the
# training data in the next processing step.
if len(stroke) == 0 or len(stroke) == 1 or len(stroke) == 2:
return -1, -1, -1, 0
else:
for index in range(len(stroke)):
try:
if index in range(0, len(stroke) - 1):
stroke_length += util.point_2_point(stroke[index], stroke[index + 1])
avg_degree += math.fabs(util.calculate_angle(stroke[0],
stroke[index + 1])) / (len(stroke) - 1)
if index in range(1, len(stroke) - 1):
# Average distance of the stroke's points from the line,
# that connects the first and the final point.
d_line_avg_distance += util.point_2_line(stroke[0], stroke[-1],
stroke[index]) / (len(stroke) - 2)
if index in range(1, len(stroke)):
# Average distance of the stroke's points from the horizontal line,
# that goes through the first point.
h_line_avg_distance += \
util.point_2_line(stroke[0], util.Point(stroke[0].x + 1, stroke[0].y),
stroke[index]) / (len(stroke) - 1)
except ZeroDivisionError:
# In case of division error, that occurs during the calculation of the angle
# (due to faulty xml data)
# ignore the point and move to the next.
pass
return avg_degree, h_line_avg_distance, d_line_avg_distance, stroke_length
def create_stroke_statistics(self):
"""
Generates the parameter vector for each stroke.
The strokes are stored in numpy array for each file.
:return: An n by 4 array, containing stroke parameters per column.
"""
length_sum = 0
stroke_set = []
for stroke_index, stroke in enumerate(self.strokes):
params = self.get_stroke_parameters(stroke)
length_sum += params[3]
stroke_set.append(params)
return np.array([(deg, h_dist, d_dist, length / (length_sum / len(self.strokes))) for
(deg, h_dist, d_dist, length) in stroke_set])
def standardize_input(self, stat):
"""
Standardizes the statistics.
:param stat: An n by 4 matrix, that contains the stroke parameters per column.
:return: The standardized input, that will be processed by the model.
"""
# Dividing the parameters into separate lists for scaling.
avg_degree = stat[:, 0].reshape(-1, 1)
h_distance = stat[:, 1].reshape(-1, 1)
d_distance = stat[:, 2].reshape(-1, 1)
length = stat[:, 3].reshape(-1, 1)
self.length = length
# If the parameter is None due to faulty xml or outlying stroke length, it is necessary to
# define them before scaling. To make sure these wont be classified as horizontal lines,
# the highest values are given.
avg_degree = np.array([np.nanmax(avg_degree) if deg is None else deg for deg in avg_degree])
h_distance = np.array([np.nanmax(h_distance) if dist is None else dist for dist in h_distance])
d_distance = np.array([np.nanmax(d_distance) if dist is None else dist for dist in d_distance])
# Calculating mean average
degree_scale = preprocessing.StandardScaler().fit(avg_degree)
h_distance_scale = preprocessing.StandardScaler().fit(h_distance)
d_distance_scale = preprocessing.StandardScaler().fit(d_distance)
length_scale = preprocessing.StandardScaler().fit(length)
# Transforming data
avg_degree = degree_scale.transform(avg_degree)
h_distance = h_distance_scale.transform(h_distance)
d_distance = d_distance_scale.transform(d_distance)
length = length_scale.transform(length)
temp_array = np.array([avg_degree, h_distance, d_distance, length])
std_input = []
for i in range(temp_array.shape[1]):
std_input.append(temp_array[:, i])
return std_input
def determine_handedness(self):
line_dir = []
for index in self.h_line_indexes:
if self.strokes[int(index)][0].x < self.strokes[int(index)][-1].x:
line_dir.append(False)
self.right_strokes.append(index)
else:
line_dir.append(True)
if line_dir.count(True) > 2:
return "left"
elif len(line_dir) <= 2:
return "unknown"
else:
return "right"
def dump_predictions(root_dir):
for file in os.listdir(root_dir):
if isfile(join(root_dir, file)):
alg = Algorithm(join(root_dir, file))
xmlh.dump_results(join(root_dir, file), calculated_handedness=alg.determine_handedness())
xmlh.mark_horizontal(join(root_dir, file), alg.get_horizontal_lines(), alg.right_strokes)
print(join(root_dir, file) + "-Completed")
else:
dump_predictions(join(root_dir, file))
def main():
dump_predictions(str(sys.argv[1]))
if __name__ == "__main__":
main()
| mit |
daq-tools/kotori | kotori/io/export/util.py | 1 | 3386 | # -*- coding: utf-8 -*-
# (c) 2016-2021 Andreas Motl <[email protected]>
from twisted.logger import Logger, LogLevel
log = Logger()
try:
import pandas
except ImportError:
log.failure('Dataframe functions not available, please install "pandas".', level=LogLevel.warn)
try:
from pandas.tslib import Timedelta
except ImportError:
try:
from pandas import Timedelta
except ImportError:
log.failure('Dataframe functions not available, please install "pandas".', level=LogLevel.warn)
def dataframe_index_to_column(df, column):
"""
Copy DataFrame index column to real data column.
"""
dt = df.index
df[column] = dt
#df.reset_index(drop=True, inplace=True)
return df
def dataframe_wide_to_long_indexed(df, column):
"""
Convert DataFrame from wide to long format using specified column as index column,
followed by indexing the DataFrame on the very same column and finally sorting it.
See also:
- http://pandas.pydata.org/pandas-docs/stable/reshaping.html#reshaping-by-melt
- http://stackoverflow.com/questions/17688155/complicated-for-me-reshaping-from-wide-to-long-in-pandas
"""
df = pandas.melt(df, id_vars=column).dropna()
df = dataframe_index_and_sort(df, column)
return df
def dataframe_index_and_sort(df, column):
"""
Index and sort DataFrame on specified column.
"""
df = df.set_index([column])
df = df.sort_index()
return df
def matplotlib_locator_formatter(timedelta, span=1):
"""
Compute appropriate locator and formatter for renderers
based on matplotlib, depending on designated time span.
"""
from matplotlib.dates import date_ticker_factory, DateFormatter
locator, formatter = date_ticker_factory(span)
# http://pandas.pydata.org/pandas-docs/stable/timedeltas.html
# https://stackoverflow.com/questions/16103238/pandas-timedelta-in-days
is_macro = timedelta <= Timedelta(days=1)
is_supermacro = timedelta <= Timedelta(minutes=5)
if is_macro:
#formatter = DateFormatter(fmt='%H:%M:%S.%f')
formatter = DateFormatter(fmt='%H:%M')
if is_supermacro:
formatter = DateFormatter(fmt='%H:%M:%S')
# Formatter overrides
#if formatter.fmt == '%H:%M\n%b %d':
# formatter = DateFormatter(fmt='%Y-%m-%d %H:%M')
# Labs
#from matplotlib.dates import AutoDateLocator, AutoDateFormatter, HOURLY
#locator = AutoDateLocator(maxticks=7)
#locator.autoscale()
#locator.intervald[HOURLY] = [5]
#formatter = AutoDateFormatter(breaks)
#formatter = date_format('%Y-%m-%d\n%H:%M')
# Default building blocks
#from matplotlib.dates import AutoDateFormatter, AutoDateLocator
#locator = AutoDateLocator()
#formatter = AutoDateFormatter(locator)
return locator, formatter
def make_timezone_unaware(df):
# Please ensure that datetimes are timezone unaware before writing to Excel.
# https://github.com/pandas-dev/pandas/pull/27129
# https://github.com/pandas-dev/pandas/issues/28921
# https://stackoverflow.com/questions/61802080/excelwriter-valueerror-excel-does-not-support-datetime-with-timezone-when-savin
# https://github.com/pandas-dev/pandas/issues/7056
df['time'] = pandas.to_datetime(
df['time'], utc=True) \
.dt.tz_convert('UTC') \
.dt.tz_localize(None)
| agpl-3.0 |
Chilipp/psy-simple | psy_simple/base.py | 1 | 33362 | import six
from abc import abstractmethod
from collections import defaultdict
from itertools import chain
import numpy as np
import inspect
import pandas as pd
import matplotlib.pyplot as plt
from psyplot.docstring import docstrings, safe_modulo, dedent
from psyplot.data import InteractiveList, open_dataset
from psyplot.compat.pycompat import filter
from psyplot.plotter import (
Plotter, Formatoption, rcParams, START)
docstrings.params['replace_note'] = inspect.cleandoc("""
You can insert any meta key from the :attr:`xarray.DataArray.attrs` via a
string like ``'%%(key)s'``. Furthermore there are some special cases:
- Strings like ``'%%Y'``, ``'%%b'``, etc. will be replaced using the
:meth:`datetime.datetime.strftime` method as long as the data has a time
coordinate and this can be converted to a :class:`~datetime.datetime`
object.
- ``'%%(x)s'``, ``'%%(y)s'``, ``'%%(z)s'``, ``'%%(t)s'`` will be replaced
by the value of the x-, y-, z- or time coordinate (as long as this
coordinate is one-dimensional in the data)
- any attribute of one of the above coordinates is inserted via
``axis + key`` (e.g. the name of the x-coordinate can be inserted via
``'%%(xname)s'``).
- Labels defined in the :class:`psyplot.rcParams` ``'texts.labels'`` key
are also replaced when enclosed by '{}'. The standard labels are
- %s""" % '\n - '.join(
'%s: ``%s``' % tuple(item) for item in six.iteritems(
rcParams['texts.labels'])))
docstrings.params['colors'] = inspect.cleandoc("""
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and wonderful ways,
including full names (``'green'``), hex strings (``'#008000'``), RGB or
RGBA tuples (``(0,1,0,1)``) or grayscale intensities as a string
(``'0.8'``).""")
docstrings.params['fontsizes'] = inspect.cleandoc("""
float
The absolute font size in points (e.g., 12)
string
Strings might be 'xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large'.""")
class TextBase(object):
"""Abstract base class for formatoptions that provides a replace method"""
delimiter = None
group = 'labels'
@property
def enhanced_attrs(self):
"""The enhanced attributes of the array"""
arr = self.data
return self.get_enhanced_attrs(arr)
@property
def rc(self):
""":class:`~psyplot.config.rcsetup.SubDict` of rcParams 'texts' key"""
try:
return self._rc
except AttributeError:
return rcParams.find_and_replace(base_str=['texts.'])
data_dependent = True
@docstrings.dedent
def replace(self, s, data, attrs=None):
"""
Replace the attributes of the plotter data in a string
%(replace_note)s
Parameters
----------
s: str
String where the replacements shall be made
data: InteractiveBase
Data object from which to use the coordinates and insert the
coordinate and attribute informations
attrs: dict
Meta attributes that shall be used for replacements. If None, it
will be gained from `data.attrs`
Returns
-------
str
`s` with inserted informations"""
# insert labels
s = s.format(**self.rc['labels'])
# replace attributes
attrs = attrs or data.attrs
if hasattr(getattr(data, 'psy', None), 'arr_name'):
attrs = attrs.copy()
attrs['arr_name'] = data.psy.arr_name
s = safe_modulo(s, attrs)
# replace datetime.datetime like time informations
if isinstance(data, InteractiveList):
data = data[0]
tname = self.any_decoder.get_tname(
next(self.plotter.iter_base_variables), data.coords)
if tname is not None and tname in data.coords:
time = data.coords[tname]
if not time.values.ndim:
try: # assume a valid datetime.datetime instance
s = pd.to_datetime(str(time.values[()])).strftime(s)
except ValueError:
pass
if six.PY2:
return s.decode('utf-8')
return s
def get_fig_data_attrs(self, delimiter=None):
"""Join the data attributes with other plotters in the project
This method joins the attributes of the
:class:`~psyplot.InteractiveBase` instances in the project that
draw on the same figure as this instance does.
Parameters
----------
delimiter: str
Specifies the delimiter with what the attributes are joined. If
None, the :attr:`delimiter` attribute of this instance or (if the
latter is also None), the rcParams['texts.delimiter'] item is used.
Returns
-------
dict
A dictionary with all the meta attributes joined by the specified
`delimiter`"""
if self.project is not None:
delimiter = next(filter(lambda d: d is not None, [
delimiter, self.delimiter, self.rc['delimiter']]))
figs = self.project.figs
fig = self.ax.get_figure()
if self.plotter._initialized and fig in figs:
ret = figs[fig].joined_attrs(delimiter=delimiter,
plot_data=True)
else:
ret = self.get_enhanced_attrs(self.plotter.plot_data)
self.logger.debug(
'Can not get the figure attributes because plot has not '
'yet been initialized!')
return ret
else:
return self.get_enhanced_attrs(self.plotter.plot_data)
def get_enhanced_attrs(self, *args, **kwargs):
replot = kwargs.pop('replot', False)
if hasattr(self, '_enhanced_attrs') and not (
self.plotter.replot or replot):
return self._enhanced_attrs
self._enhanced_attrs = self.plotter.get_enhanced_attrs(*args, **kwargs)
return self._enhanced_attrs
def get_fmt_widget(self, parent, project):
"""Create a combobox with the attributes"""
from psy_simple.widgets.texts import LabelWidget
return LabelWidget(parent, self, project)
docstrings.params['fontweights'] = inspect.cleandoc("""
float
a float between 0 and 1000
string
Possible strings are one of 'ultralight', 'light', 'normal',
'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'.""")
@docstrings.get_sections(base='label_weight')
@dedent
def label_weight(base, label_name=None, children=[], parents=[],
dependencies=[]):
"""
Function that returns a Formatoption class for modifying the fontweight
This function returns a :class:`~psyplot.plotter.Formatoption` instance
that modifies the weight of the given `base` formatoption
Parameters
----------
base: Formatoption
The base formatoption instance that is used in the
:class:`psyplot.Plotter` subclass to create the label. The instance
must have a ``texts`` attribute which stores all the
:class:`matplotlib.text.Text` instances.
label_name: str
The name of the label to use in the documentation. If None,
it will be ``key``, where ``key`` is the
:attr:`psyplot.plotter.Formatoption.key`` attribute of `base`
children: list of str
The childrens of the resulting formatoption class (besides the `base`
formatoption which is included anyway)
parents: list of str
The parents of the resulting formatoption class (besides the `base`
the properties formatoption from `base` (see :func:`label_props`))
dependencies: list of str
The dependencies of the formatoption
Returns
-------
Formatoption
The formatoption instance that modifies the fontweight of `base`
See Also
--------
label_size, label_props, Figtitle, Title"""
label_name = label_name or base.key
cl_children = children
cl_parents = parents
cl_dependencies = dependencies
class LabelWeight(Formatoption):
__doc__ = """
Set the fontweight of the %s
Possible types
--------------
%%(fontweights)s
See Also
--------
%s, %s, %s""" % (label_name, base.key, base.key + 'size',
base.key + 'props')
children = [base.key] + \
cl_children
parent = [base.key + 'props'] + cl_parents
dependencies = cl_dependencies
group = 'labels'
name = 'Font weight of ' + (base.name or base.key)
def update(self, value):
for text in getattr(self, base.key).texts:
text.set_weight(value)
def get_fmt_widget(self, parent, project):
"""Get a widget with the different font weights"""
from psy_simple.widgets.texts import FontWeightWidget
return FontWeightWidget(
parent, self, next(iter(getattr(self, base.key).texts), None),
base)
return LabelWeight(base.key + 'weight')
@docstrings.dedent
def label_size(base, label_name=None, children=[], parents=[],
dependencies=[]):
"""
Function that returns a Formatoption class for modifying the fontsite
This function returns a :class:`~psyplot.plotter.Formatoption` instance
that modifies the size of the given `base` formatoption
Parameters
----------
%(label_weight.parameters)s
Returns
-------
Formatoption
The formatoption instance that modifies the fontsize of `base`
See Also
--------
label_weight, label_props, Figtitle, Title"""
label_name = label_name or base.key
cl_children = children
cl_parents = parents
cl_dependencies = dependencies
class LabelSize(Formatoption):
__doc__ = """
Set the size of the %s
Possible types
--------------
%%(fontsizes)s
See Also
--------
%s, %s, %s""" % (label_name, base.key, base.key + 'weight',
base.key + 'props')
children = [base.key] + cl_children
parent = [base.key + 'props'] + cl_parents
dependencies = cl_dependencies
group = 'labels'
name = 'Font size of ' + (base.name or base.key)
def update(self, value):
for text in getattr(self, base.key).texts:
text.set_size(value)
def get_fmt_widget(self, parent, project):
"""Get a widget with the different font weights"""
from psy_simple.widgets.texts import FontSizeWidget
return FontSizeWidget(
parent, self, next(iter(getattr(self, base.key).texts), None),
base)
return LabelSize(base.key + 'size')
docstrings.keep_params('label_weight.parameters', 'base', 'label_name')
@docstrings.dedent
def label_props(base, label_name=None, children=[], parents=[],
dependencies=[]):
"""
Function that returns a Formatoption class for modifying the fontsite
This function returns a :class:`~psyplot.plotter.Formatoption` instance
that modifies the size of the given `base` formatoption
Parameters
----------
%(label_weight.parameters)s
children: list of str
The childrens of the resulting formatoption class (besides the `base`
formatoption, the ``base.key + 'size'`` and ``base.key + 'weight'``
keys, which are included anyway (see :func:`label_size`,
:func:`label_weight`))
parents: list of str
The parents of the resulting formatoption class
Returns
-------
Formatoption
The formatoption instance that modifies the fontsize of `base`
See Also
--------
label_weight, label_props, Figtitle, Title"""
label_name = label_name or base.key
cl_children = children
cl_parents = parents
cl_dependencies = dependencies
class LabelProps(Formatoption):
__doc__ = """
Properties of the %s
Specify the font properties of the figure title manually.
Possible types
--------------
dict
Items may be any valid text property
See Also
--------
%s, %s, %s""" % (label_name, base.key, base.key + 'size',
base.key + 'weight')
children = cl_children
parents = cl_parents
dependencies = [base.key, base.key + 'size', base.key + 'weight'] + \
cl_dependencies
group = 'labels'
name = 'Font properties of ' + (base.name or base.key)
def __init__(self, *args, **kwargs):
super(LabelProps, self).__init__(*args, **kwargs)
self.default_props = {}
self._todefault = False
def set_value(self, value, validate=True, todefault=False):
self._todefault = todefault
super(LabelProps, self).set_value(value, validate, todefault)
def update(self, fontprops):
fontprops = fontprops.copy()
# store default font properties
try:
text = next(iter(getattr(self, base.key).texts))
except StopIteration:
return
# TODO: This handling of the default management is not really
# satisfying because you run into troubles when using alternate
# property names (e.g. if you use 'ha' and 'horizontalalignment'
# at the same time)
if not self._todefault:
for key in fontprops:
if key == 'bbox':
default = dict(facecolor='none', edgecolor='none')
else:
default = getattr(text, 'get_' + key)()
self.default_props.setdefault(key, default)
else:
fontprops = self.default_props.copy()
self.default_props.clear()
if 'size' not in fontprops and 'fontsize' not in fontprops:
fontprops['size'] = getattr(self, base.key + 'size').value
if 'weight' not in fontprops and 'fontweight' not in fontprops:
fontprops['weight'] = getattr(self, base.key + 'weight').value
for text in getattr(self, base.key).texts:
text.update(fontprops)
self._todefault = False
def get_fmt_widget(self, parent, project):
"""Get a widget with the different font weights"""
from psy_simple.widgets.texts import FontPropertiesWidget
return FontPropertiesWidget(
parent, self, next(iter(getattr(self, base.key).texts), None),
base)
return LabelProps(base.key + 'props')
class Title(TextBase, Formatoption):
"""
Show the title
Set the title of the plot.
%(replace_note)s
Possible types
--------------
str
The title for the :func:`~matplotlib.pyplot.title` function.
Notes
-----
This is the title of this specific subplot! For the title of the whole
figure, see the :attr:`figtitle` formatoption.
See Also
--------
figtitle, titlesize, titleweight, titleprops"""
name = 'Axes title'
def initialize_plot(self, value):
arr = self.data
self.texts = [self.ax.set_title(
self.replace(value, arr, attrs=self.enhanced_attrs))]
def update(self, value):
arr = self.data
self.texts[0].set_text(self.replace(
value, arr, attrs=self.enhanced_attrs))
class Figtitle(TextBase, Formatoption):
"""
Plot a figure title
Set the title of the figure.
%(replace_note)s
Possible types
--------------
str
The title for the :func:`~matplotlib.pyplot.suptitle` function
Notes
-----
- If the plotter is part of a :class:`psyplot.project.Project` and multiple
plotters of this project are on the same figure, the replacement
attributes (see above) are joined by a delimiter. If the
:attr:`delimiter` attribute of this :class:`Figtitle` instance is not
None, it will be used. Otherwise the rcParams['texts.delimiter'] item is
used.
- This is the title of the whole figure! For the title of this specific
subplot, see the :attr:`title` formatoption.
See Also
--------
title, figtitlesize, figtitleweight, figtitleprops"""
name = 'Figure title'
@property
def enhanced_attrs(self):
return self.get_fig_data_attrs()
def initialize_plot(self, s):
if s:
self.texts = [self.ax.get_figure().suptitle(
self.replace(s, self.plotter.data, self.enhanced_attrs))]
self.clear_other_texts()
else:
self.texts = [self.ax.get_figure().suptitle('')]
def update(self, s):
if s:
self.texts[0].set_text(self.replace(s, self.plotter.data,
self.enhanced_attrs))
self.clear_other_texts()
else:
self.texts[0].set_text('')
def clear_other_texts(self, remove=False):
"""Make sure that no other text is a the same position as this one
This method clears all text instances in the figure that are at the
same position as the :attr:`_text` attribute
Parameters
----------
remove: bool
If True, the Text instances are permanently deleted from the
figure, otherwise there text is simply set to ''"""
fig = self.ax.get_figure()
# don't do anything if our figtitle is the only Text instance
if len(fig.texts) == 1:
return
for i, text in enumerate(fig.texts):
if text == self._text:
continue
if text.get_position() == self._text.get_position():
if not remove:
text.set_text('')
else:
del fig[i]
class Text(TextBase, Formatoption):
"""
Add text anywhere on the plot
This formatoption draws a text on the specified position on the figure.
%(replace_note)s
Possible types
--------------
str
If string s: this will be used as (1., 1., s, {'ha': 'right'}) (i.e. a
string in the upper right corner of the axes).
tuple or list of tuples (x,y,s[,coord.-system][,options]])
Each tuple defines a text instance on the plot. 0<=x, y<=1 are the
coordinates. The coord.-system can be either the data coordinates
(default, ``'data'``) or the axes coordinates (``'axes'``) or the
figure coordinates ('fig'). The string s finally is the text. options
may be a dictionary to specify format the appearence (e.g. ``'color'``,
``'fontweight'``, ``'fontsize'``, etc., see
:class:`matplotlib.text.Text` for possible keys).
To remove one single text from the plot, set (x,y,''[, coord.-system])
for the text at position (x,y)
empty list
remove all texts from the plot
See Also
--------
title, figtitle"""
name = 'Arbitrary text on the plot'
@property
def transform(self):
"""Dictionary containing the relevant transformations"""
ax = self.ax
return {'axes': ax.transAxes,
'fig': ax.get_figure().transFigure,
'data': ax.transData}
def __init__(self, *args, **kwargs):
Formatoption.__init__(self, *args, **kwargs)
#: texts that shall be removed when updating
self._texts_to_remove = set()
#: :class:`matplotlib.texts.Text` instances on the figure
self._texts = defaultdict(set)
def _remove_texttuple(self, pos):
"""Remove a texttuple from the value in the plotter
Parameters
----------
pos: tuple (x, y, cs)
x and y are the x- and y-positions and cs the coordinate system"""
for i, (old_x, old_y, s, old_cs, d) in enumerate(self.value):
if (old_x, old_y, old_cs) == pos:
self.value.pop(i)
return
raise ValueError("{0} not found!".format(pos))
def _update_texttuple(self, x, y, s, cs, d):
"""Update the text tuple at `x` and `y` with the given `s` and `d`"""
pos = (x, y, cs)
for i, (old_x, old_y, old_s, old_cs, old_d) in enumerate(self.value):
if (old_x, old_y, old_cs) == pos:
self.value[i] = (old_x, old_y, s, old_cs, d)
return
raise ValueError("No text tuple found at {0}!".format(pos))
def set_value(self, value, validate=True, todefault=False):
value = self.validate(value) if validate else value
# mark all texts for removing if value is empty
if not value or todefault:
with self.plotter.no_validation:
self.plotter[self.key] = []
for cs, texts in self._texts.items():
for t in texts:
pos = t.get_position()
self._texts_to_remove.add((pos[0], pos[1], cs))
# loop through texttuples to see whether one changed or has to be
# removed. x: x-coord, y: y-coord, s: string, cs: coord.-system,
# d: text params dictionary
for x, y, s, cs, d in value:
if not s:
try:
self._remove_texttuple((x, y, cs))
self._texts_to_remove.add((x, y, cs))
except ValueError:
pass
else:
try:
self._update_texttuple(x, y, s, cs, d)
except ValueError:
self.value.append((x, y, s, cs, d))
def update(self, value, texts_to_remove=None):
# remove texts
for (x, y, cs) in texts_to_remove or self._texts_to_remove:
for t in self._texts[cs]:
if (x, y) == t.get_position():
self._texts[cs].remove(t)
t.remove()
break
if self.plotter.replot:
value = self.value + value
# now update the old texts or create new ones
for x, y, s, cs, d in value:
if cs == 'fig':
s = self.replace(
s, self.plotter.data, self.get_fig_data_attrs(
d.pop('delimiter', None)))
else:
s = self.replace(s, self.plotter.data, self.enhanced_attrs)
found = False
for t in self._texts[cs]:
if (x, y) == t.get_position():
t.set_text(s)
t.update(d.copy())
found = True
break
if not found:
self._texts[cs].add(self.ax.text(
x, y, s, d.copy(), transform=self.transform[cs]))
def share(self, fmto, **kwargs):
"""Share the settings of this formatoption with other data objects
Parameters
----------
fmto: Formatoption
The :class:`Formatoption` instance to share the attributes with
``**kwargs``
Any other keyword argument that shall be passed to the update
method of `fmto`
Notes
-----
The Text formatoption sets the 'texts_to_remove' keyword to the
:attr:`_texts_to_remove` attribute of this instance (if not already
specified in ``**kwargs``"""
kwargs.setdefault('texts_to_remove', self._texts_to_remove)
super(Text, self).share(fmto, **kwargs)
def diff(self, value):
my_value = self.value
return (not len(value) and len(my_value)) or any(
val not in my_value for val in value)
def finish_update(self):
"""Clears the :attr:`_texts_to_remove` set"""
self._texts_to_remove.clear()
def remove(self):
for t in chain.from_iterable(six.itervalues(self._texts)):
t.remove()
self._texts.clear()
class Tight(Formatoption):
"""
Automatically adjust the plots.
If set to True, the plots are automatically adjusted to fit to the figure
limitations via the :func:`matplotlib.pyplot.tight_layout()` function.
Possible types
--------------
bool
True for automatic adjustment
Warnings
--------
There is no update method to undo what happend after this formatoption is
set to True!"""
group = 'axes'
name = 'Tight layout'
def update(self, value):
if value:
plt.sca(self.ax)
plt.tight_layout()
class BackgroundColor(Formatoption):
"""The background color for the matplotlib axes.
Possible types
--------------
'rc'
to use matplotlibs rc params
None
to use a transparent color
color
Any possible matplotlib color
"""
group = 'axes'
name = 'Background color of the plot'
def update(self, value):
if value == 'rc':
self.ax.patch.set_facecolor(plt.rcParams['axes.facecolor'])
self.ax.set_facecolor(plt.rcParams['axes.facecolor'])
elif value is None:
self.ax.patch.set_facecolor('none')
self.ax.set_facecolor('none')
else:
self.ax.patch.set_facecolor(value)
self.ax.set_facecolor(value)
def get_fmt_widget(self, parent, project):
from psy_simple.widgets.colors import BackGroundColorWidget
return BackGroundColorWidget(parent, self, project)
class ValueMaskBase(Formatoption):
"""Base class for masking formatoptions"""
priority = START
group = 'masking'
data_dependent = True
@abstractmethod
def mask_func(self):
"""The masking function that is called"""
return
def update(self, value):
if value is None:
pass
else:
for i, data in enumerate(self.iter_data):
self.set_data(self._mask_data(data, value), i)
def _mask_data(self, data, value):
data = data.copy(data=np.copy(data.values))
data.values[~np.isnan(data.values)] = self.mask_func(
data.values[~np.isnan(data.values)], value)
return data
class MaskLess(ValueMaskBase):
"""
Mask data points smaller than a number
Possible types
--------------
float
The floating number to mask below
See Also
--------
maskleq, maskgreater, maskgeq, maskbetween
"""
name = 'Mask less'
def mask_func(self, data, value):
data[data < value] = np.nan
return data
class MaskLeq(ValueMaskBase):
"""
Mask data points smaller than or equal to a number
Possible types
--------------
float
The floating number to mask below
See Also
--------
maskless, maskgreater, maskgeq, maskbetween
"""
name = 'Mask lesser than or equal'
def mask_func(self, data, value):
data[data <= value] = np.nan
return data
class MaskGreater(ValueMaskBase):
"""
Mask data points greater than a number
Possible types
--------------
float
The floating number to mask above
See Also
--------
maskless, maskleq, maskgeq, maskbetween
"""
name = 'Mask greater'
def mask_func(self, data, value):
data[data > value] = np.nan
return data
class MaskGeq(ValueMaskBase):
"""
Mask data points greater than or equal to a number
Possible types
--------------
float
The floating number to mask above
See Also
--------
maskless, maskleq, maskgreater, maskbetween
"""
name = 'Mask greater than or equal'
def mask_func(self, data, value):
data[data >= value] = np.nan
return data
class MaskBetween(ValueMaskBase):
"""
Mask data points between two numbers
Possible types
--------------
float
The floating number to mask above
See Also
--------
maskless, maskleq, maskgreater, maskgeq
"""
name = 'Mask between two values'
def mask_func(self, data, value):
data[np.all([data >= value[0], data <= value[1]], axis=0)] = np.nan
return data
class Mask(Formatoption):
"""Mask the data where a certain condition is True
This formatoption can be used to mask the plotting data based on another
array. This array can be the name of a variable in the base dataset,
or it can be a numeric array. Note that the data needs to be on exactly
the same coordinates as the data shown here
Possible types
--------------
None
Apply no mask
str
The name of a variable in the base dataset to use.
- dimensions that are in the given `mask` but not in the visualized
base variable will be aggregated using :func:`numpy.any`
- if the given `mask` misses dimensions that are in the visualized
data (i.e. the data of this plotter), we broadcast the `mask` to
match the shape of the data
- dimensions that are in `mask` and the base variable, but not in the
visualized data will be matched against each other
str
The path to a netCDF file that shall be loaded
xr.DataArray or np.ndarray
An array that can be broadcasted to the shape of the data
"""
priority = START
group = 'masking'
name = "Apply a mask"
def update(self, value):
if value is None:
return
for i, data in enumerate(self.iter_data):
mask = self.load_mask(data, value)
new_data = data.where(mask.astype(bool))
new_data.psy.base = data.psy.base
new_data.psy.idims = data.psy.idims
self.set_data(new_data, i)
def diff(self, value):
try:
return bool(self.value != value)
except ValueError:
if hasattr(value, 'shape') and hasattr(self.value, 'shape'):
return ((value.shape != self.value.shape) |
(value != self.value).any())
else:
return True
def load_mask(self, data, value):
if isinstance(value, str) and value in data.psy.base:
mask = data.psy.base[value]
if not set(mask.dims).intersection(data.dims):
raise ValueError("No intersection between dimensions of mask "
f"{value}: {mask.dims}, and the data: "
f"{data.dims}")
elif isinstance(value, str):
try:
mask = open_dataset(value)
except Exception:
raise ValueError(
f"{value} is not in the base dataset of "
f"{data.psy.arr_name} and could not be loaded with "
f"psy.open_dataset({repr(value)})")
else:
available_vars = [
v for v in mask
if set(mask[v].dims).intersection(data.dims)]
if not available_vars:
raise ValueError(f"No variable in {value} has an overlap "
f"with the data dimensions {data.dims}")
else:
mask = mask[available_vars[0]]
else:
mask = value
base_var = next(data.psy.iter_base_variables)
# aggregate mask over dimensions that are not in the base variable
dims2agg = set(mask.dims).difference(set(base_var.dims))
if dims2agg:
mask = mask.any(list(dims2agg))
# select idims of mask
idims = {d: sl for d, sl in data.psy.idims.items()
if d in mask.dims and d not in data.dims}
if idims:
mask = mask.isel(**idims)
return mask
class TitlesPlotter(Plotter):
"""Plotter class for labels"""
_rcparams_string = ['plotter.baseplotter.']
title = Title('title')
titlesize = label_size(title)
titleweight = label_weight(title)
titleprops = label_props(title)
figtitle = Figtitle('figtitle')
figtitlesize = label_size(figtitle, 'figure title')
figtitleweight = label_weight(figtitle, 'figure title')
figtitleprops = label_props(figtitle, 'figure title')
text = Text('text')
class BasePlotter(TitlesPlotter):
"""Base class with formatoptions for plotting on an matplotlib axes"""
_rcparams_string = ['plotter.baseplotter.']
tight = Tight('tight')
background = BackgroundColor('background')
maskless = MaskLess('maskless')
maskleq = MaskLeq('maskleq')
maskgreater = MaskGreater('maskgreater')
maskgeq = MaskGeq('maskgeq')
maskbetween = MaskBetween('maskbetween')
mask = Mask('mask')
| gpl-2.0 |
navierula/Research-Fall-2017 | minMaxCalc/start_again.py | 1 | 4221 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 23:06:12 2017
@author: navrajnarula
"""
#c = [0.5, 3, 6, 40, 90, 130.8, 129, 111, 8, 9, 0.01, 9, 40, 90, 130.1, 112, 108, 90, 77, 68, 0.9, 8, 40, 90, 92, 130.4]
#c= [0, 10, 11, 48, 50.5, 0.48, 17, 18, 23, 29, 33, 34.67, 50.1, 0.09, 7, 41, 45, 50]
#### METHOD 1: Create generator function
lst = [-0.5, 44, 90, 132.22, 129.6, 89, 67.91, 12.5, 11, 0.0006, 10.2,
67, 89.07, 100, 132.224, 129.88, 120.1, 100, 89.5, 75, 40, 9.8, -0.4,
0.1, 90, 99, 112, 132.22,
]
def get_groups(lst):
up = False
for i, (u, v) in enumerate(zip(lst, lst[1:])):
if up:
if v < u:
yield 'End', i, u
up = False
else:
if v > u:
yield 'Start', i, u
up = True
if up:
yield 'End', i + 1, lst[-1]
#print("METHOD 1:\n")
#for t in get_groups(lst):
# print(t)
#### METHOD 2: using numpy libraries
from scipy.signal import argrelextrema
import numpy as np
lst = [-0.5, 44, 90, 132.22, 129.6, 89, 67.91, 12.5, 11, 0.0006, 10.2, 67, 89.07, 100, 132.224, 129.88, 120.1, 100, 89.5, 75, 40, 9.8, -0.4, 0.1, 90, 99, 112, 132.22]
arr = np.array(lst)
#Find local minimas index, add zero in the beginning
minInd = np.insert(argrelextrema(arr, np.less),0,0)
# Find local maximas index, add the length of arr - 1 at the end
maxInd = np.append(argrelextrema(arr,np.greater),[len(lst)-1])
# numpy indexing and zip to combine the results
end_arr = list(zip(zip(minInd,arr[minInd]),zip(maxInd,arr[maxInd])))
##Printing the output
#print("\nMETHOD 2:\n")
#for i in end_arr:
# print('Start :' , i[0])
# print('End:', i[1],'\n')
#### METHOD 3: Sorting
load = lst
load.sort(key=float) # previously key = int
totals = []
for count, items in enumerate(load):
counter = count + 1
last_object = (counter, load[count], load[(len(load)-1) - count])
totals.append(last_object)
#our_totals = totals[:3]
#print("\nMETHOD 3:\n")
#print(our_totals)
###############################################
print("\nTrying on REAL data:\n")
import pandas as pd
# read in dataset
xl = pd.ExcelFile("data/130N_Cycles_1-47.xlsx")
df = xl.parse("Specimen_RawData_1")
df
# append data from load column to list
load = []
for item in df.index:
load.append(df["Round"][item])
#### METHOD 1: Create generator function
lst = load
def get_groups(lst):
up = False
for i, (u, v) in enumerate(zip(lst, lst[1:])):
if up:
if v < u:
yield 'End', i, u
up = False
else:
if v > u:
yield 'Start', i, u
up = True
if up:
yield 'End', i + 1, lst[-1]
print("METHOD 1:\n")
for t in get_groups(lst):
print(t)
#### METHOD 2: using numpy libraries
from scipy.signal import argrelextrema
import numpy as np
lst = load
arr = np.array(lst)
#Find local minimas index, add zero in the beginning
minInd = np.insert(argrelextrema(arr, np.less),0,0)
# Find local maximas index, add the length of arr - 1 at the end
maxInd = np.append(argrelextrema(arr,np.greater),[len(lst)-1])
# numpy indexing and zip to combine the results
end_arr = list(zip(zip(minInd,arr[minInd]),zip(maxInd,arr[maxInd])))
#Printing the output
#print("\nMETHOD 2:\n")
#count = 0
#for i in end_arr:
# if count <= 47:
# print('Start :' , i[0])
# print('End:', i[1],'\n')
# count += 1
#### METHOD 3: Sorting
load.sort(key=float) # previously key = int
totals = []
for count, items in enumerate(load):
counter = count + 1
last_object = (counter, load[count], load[(len(load)-1) - count])
totals.append(last_object)
#our_totals = totals[:47]
#print("\nMETHOD 3:\n")
#print(our_totals)
# save the output in the list, min and the max
# run it through my algorithm
# compare them separately in algorithm
# save rows in file --> rounding to 0 helps
# play with rounding to comma 1,
# compare it with the real data
# check both minimin and maximum
# do it manually for 2 - 3 different ones
# clean the data from all bad values
# use ROUND, not anything else!!!
# redownloaded dataset
| mit |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
calliope-project/calliope | calliope/backend/pyomo/util.py | 1 | 6916 | """
Copyright (C) since 2013 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
"""
import logging
import re
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
import xarray as xr
import pyomo.core as po
from calliope.core.util.tools import memoize
from calliope import exceptions
logger = logging.getLogger(__name__)
@memoize
def get_param(backend_model, var, dims):
"""
Get an input parameter held in a Pyomo object, or held in the defaults
dictionary if that Pyomo object doesn't exist.
Parameters
----------
backend_model : Pyomo model instance
var : str
dims : single value or tuple
"""
try:
return getattr(backend_model, var)[dims]
except AttributeError: # i.e. parameter doesn't exist at all
logger.debug(
"get_param: var {} and dims {} leading to default lookup".format(var, dims)
)
return backend_model.__calliope_defaults[var]
except KeyError: # try removing timestep
try:
if len(dims) > 2:
return getattr(backend_model, var)[dims[:-1]]
else:
return getattr(backend_model, var)[dims[0]]
except KeyError: # Static default value
logger.debug(
"get_param: var {} and dims {} leading to default lookup".format(
var, dims
)
)
return backend_model.__calliope_defaults[var]
def get_previous_timestep(timesteps, timestep):
"""Get the timestamp for the timestep previous to the input timestep"""
return timesteps[timesteps.ord(timestep) - 1]
@memoize
def get_timestep_weight(backend_model):
"""
Get the total number of years this model considers, by summing all
timestep resolution with timestep weight (a weight/resolution of 1 = 1 hour)
and divide it by number of hours in the year. Weight/resolution will almost
always be 1 per step, unless time clustering/masking/resampling has taken place.
"""
time_res = [po.value(i) for i in backend_model.timestep_resolution.values()]
weights = [po.value(i) for i in backend_model.timestep_weights.values()]
return sum(np.multiply(time_res, weights)) / 8760
@memoize
def get_conversion_plus_io(backend_model, tier):
"""
from a carrier_tier, return the primary tier (of `in`, `out`) and
corresponding decision variable (`carrier_con` and `carrier_prod`, respectively)
"""
if "out" in tier:
return "out", backend_model.carrier_prod
elif "in" in tier:
return "in", backend_model.carrier_con
def get_var(backend_model, var, dims=None, sparse=False, expr=False):
"""
Return output for variable `var` as a pandas.Series (1d),
pandas.Dataframe (2d), or xarray.DataArray (3d and higher).
Parameters
----------
var : variable name as string, e.g. 'resource'
dims : list, optional
indices as strings, e.g. ('loc_techs', 'timesteps');
if not given, they are auto-detected
sparse : bool, optional; default = False
If extracting Pyomo Param data, the output sparse array includes inputs
the user left as NaN replaced with the default value for that Param.
"""
try:
var_container = getattr(backend_model, var)
except AttributeError:
raise exceptions.BackendError("Variable {} inexistent.".format(var))
if not dims:
if var + "_index" == var_container.index_set().name:
dims = [i.name for i in var_container.index_set().subsets()]
else:
dims = [var_container.index_set().name]
if sparse and not expr:
result = pd.Series(var_container.extract_values_sparse())
else:
if expr:
result = pd.Series(var_container._data).apply(
lambda x: po.value(x) if not invalid(x) else np.nan
)
else:
result = pd.Series(var_container.extract_values())
if result.empty:
raise exceptions.BackendError("Variable {} has no data.".format(var))
result = result.rename_axis(index=dims)
return xr.DataArray.from_series(result)
def loc_tech_is_in(backend_model, loc_tech, model_set):
"""
Check if set exists and if loc_tech is in the set
Parameters
----------
loc_tech : string
model_set : string
"""
if hasattr(backend_model, model_set) and loc_tech in getattr(
backend_model, model_set
):
return True
else:
return False
def get_domain(var: xr.DataArray) -> str:
def check_sign(var):
if re.match("resource|node_coordinates|cost*", var.name):
return ""
else:
return "NonNegative"
if var.dtype.kind == "b":
return "Boolean"
elif is_numeric_dtype(var.dtype):
return check_sign(var) + "Reals"
else:
return "Any"
def invalid(val) -> bool:
if isinstance(val, po.base.param._ParamData):
return val._value == po.base.param._NotValid or pd.isnull(po.value(val))
else:
return pd.isnull(val)
def datetime_to_string(
backend_model: po.ConcreteModel, model_data: xr.Dataset
) -> xr.Dataset:
"""
Convert from datetime to string xarray dataarrays, to reduce the memory
footprint of converting datetimes from numpy.datetime64 -> pandas.Timestamp
when creating the pyomo model object.
Parameters
----------
backend_model : the backend pyomo model object
model_data : the Calliope xarray Dataset of model data
"""
datetime_data = set()
for attr in ["coords", "data_vars"]:
for set_name, set_data in getattr(model_data, attr).items():
if set_data.dtype.kind == "M":
attrs = model_data[set_name].attrs
model_data[set_name] = model_data[set_name].dt.strftime(
"%Y-%m-%d %H:%M"
)
model_data[set_name].attrs = attrs
datetime_data.add((attr, set_name))
backend_model.__calliope_datetime_data = datetime_data
return model_data
def string_to_datetime(
backend_model: po.ConcreteModel, model_data: xr.Dataset
) -> xr.Dataset:
"""
Convert from string to datetime xarray dataarrays, reverting the process
undertaken in
datetime_to_string
Parameters
----------
backend_model : the backend pyomo model object
model_data : the Calliope xarray Dataset of model data
"""
for attr, set_name in backend_model.__calliope_datetime_data:
if attr == "coords" and set_name in model_data:
model_data.coords[set_name] = model_data[set_name].astype("datetime64[ns]")
elif set_name in model_data:
model_data[set_name] = (
model_data[set_name].fillna(pd.NaT).astype("datetime64[ns]")
)
return model_data
| apache-2.0 |
revanthkolli/osf.io | scripts/analytics/links.py | 55 | 1227 | # -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from .utils import plot_dates, mkdirp
link_collection = database['privatelink']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'features')
mkdirp(FIG_PATH)
def analyze_view_only_links():
dates = [
record['date_created']
for record in link_collection.find({}, {'date_created': True})
]
if not dates:
return
fig = plot_dates(dates)
plt.title('view-only links ({} total)'.format(len(dates)))
plt.savefig(os.path.join(FIG_PATH, 'view-only-links.png'))
plt.close()
def analyze_view_only_links_anonymous():
dates = [
record['date_created']
for record in link_collection.find(
{'anonymous': True},
{'date_created': True},
)
]
if not dates:
return
fig = plot_dates(dates)
plt.title('anonymous view-only links ({} total)'.format(len(dates)))
plt.savefig(os.path.join(FIG_PATH, 'view-only-links-anonymous.png'))
plt.close()
def main():
analyze_view_only_links()
analyze_view_only_links_anonymous()
if __name__ == '__main__':
main()
| apache-2.0 |
NSLS-II-HXN/PyXRF | pyxrf/gui_module/wd_preview_plot_spectrum.py | 1 | 5064 | from qtpy.QtWidgets import (QWidget, QVBoxLayout, QHBoxLayout,
QRadioButton, QButtonGroup, QComboBox)
from qtpy.QtCore import Slot
from .useful_widgets import set_tooltip, global_gui_variables
from ..model.lineplot import PlotTypes, EnergyRangePresets
from matplotlib.backends.backend_qt5agg import \
FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar
import logging
logger = logging.getLogger(__name__)
class PreviewPlotSpectrum(QWidget):
def __init__(self, *, gpc, gui_vars):
super().__init__()
# Global processing classes
self.gpc = gpc
# Global GUI variables (used for control of GUI state)
self.gui_vars = gui_vars
self.cb_plot_type = QComboBox()
self.cb_plot_type.addItems(["LinLog", "Linear"])
self.cb_plot_type.setCurrentIndex(self.gpc.get_preview_plot_type())
self.cb_plot_type.currentIndexChanged.connect(self.cb_plot_type_current_index_changed)
self.rb_selected_region = QRadioButton("Selected region")
self.rb_selected_region.setChecked(True)
self.rb_full_spectrum = QRadioButton("Full spectrum")
if self.gpc.get_preview_energy_range() == EnergyRangePresets.SELECTED_RANGE:
self.rb_selected_region.setChecked(True)
elif self.gpc.get_preview_energy_range() == EnergyRangePresets.FULL_SPECTRUM:
self.rb_full_spectrum.setChecked(True)
else:
logger.error("Spectrum preview: incorrect Enum value for energy range was used:\n"
" Report the error to the development team.")
self.btn_group_region = QButtonGroup()
self.btn_group_region.addButton(self.rb_selected_region)
self.btn_group_region.addButton(self.rb_full_spectrum)
self.btn_group_region.buttonToggled.connect(self.btn_group_region_button_toggled)
self.mpl_canvas = FigureCanvas(self.gpc.plot_model._fig_preview)
self.mpl_toolbar = NavigationToolbar(self.mpl_canvas, self)
# Keep layout without change when canvas is hidden (invisible)
sp_retain = self.mpl_canvas.sizePolicy()
sp_retain.setRetainSizeWhenHidden(True)
self.mpl_canvas.setSizePolicy(sp_retain)
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(self.cb_plot_type)
hbox.addStretch(1)
hbox.addWidget(self.rb_selected_region)
hbox.addWidget(self.rb_full_spectrum)
vbox.addLayout(hbox)
vbox.addWidget(self.mpl_toolbar)
vbox.addWidget(self.mpl_canvas)
self.setLayout(vbox)
self._set_tooltips()
def _set_tooltips(self):
set_tooltip(self.cb_plot_type,
"Use <b>Linear</b> or <b>LinLog</b> axes to plot spectra")
set_tooltip(
self.rb_selected_region,
"Plot spectrum in the <b>selected range</b> of energies. The range may be set "
"in the 'Model' tab. Click the button <b>'Find Automatically ...'</b> "
"to set the range of energies before finding the emission lines. The range "
"may be changed in General Settings dialog (button <b>'General ...'</b>) at any time.")
set_tooltip(self.rb_full_spectrum,
"Plot full spectrum over <b>all available eneriges</b>.")
def update_widget_state(self, condition=None):
if condition == "tooltips":
self._set_tooltips()
self.mpl_toolbar.setVisible(self.gui_vars["show_matplotlib_toolbar"])
# Hide Matplotlib canvas during computations
state_compute = global_gui_variables["gui_state"]["running_computations"]
self.mpl_canvas.setVisible(not state_compute)
@Slot()
@Slot(bool)
def redraw_preview_plot(self):
# It is assumed that the plot is visible
self.gpc.update_preview_spectrum_plot()
def btn_group_region_button_toggled(self, button, checked):
if checked:
if button == self.rb_selected_region:
self.gpc.set_preview_energy_range(EnergyRangePresets.SELECTED_RANGE)
self.gpc.update_preview_spectrum_plot()
logger.debug("GUI: Display only selected region")
elif button == self.rb_full_spectrum:
self.gpc.set_preview_energy_range(EnergyRangePresets.FULL_SPECTRUM)
self.gpc.update_preview_spectrum_plot()
logger.debug("GUI: Display full spectrum")
else:
logger.error("Spectrum preview: unknown button was toggled. "
"Please, report the error to the development team.")
def cb_plot_type_current_index_changed(self, index):
try:
self.gpc.set_preview_plot_type(PlotTypes(index))
self.gpc.plot_model.update_preview_spectrum_plot()
except ValueError:
logger.error("Spectrum preview: incorrect index for energy range preset was detected.\n"
"Please report the error to the development team.")
| bsd-3-clause |
arhik/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/_cm.py | 70 | 375423 | """
Color data and pre-defined cmap objects.
This is a helper for cm.py, originally part of that file.
Separating the data (this file) from cm.py makes both easier
to deal with.
Objects visible in cm.py are the individual cmap objects ('autumn',
etc.) and a dictionary, 'datad', including all of these objects.
"""
import matplotlib as mpl
import matplotlib.colors as colors
LUTSIZE = mpl.rcParams['image.lut']
_binary_data = {
'red' : ((0., 1., 1.), (1., 0., 0.)),
'green': ((0., 1., 1.), (1., 0., 0.)),
'blue' : ((0., 1., 1.), (1., 0., 0.))
}
_bone_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 1.0, 1.0))}
_autumn_data = {'red': ((0., 1.0, 1.0),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 0., 0.))}
_bone_data = {'red': ((0., 0., 0.),(0.746032, 0.652778, 0.652778),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.365079, 0.444444, 0.444444),(1.0, 1.0, 1.0))}
_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 1., 1.), (1.0, 0., 0.)),
'blue': ((0., 1., 1.), (1.0, 1., 1.))}
_copper_data = {'red': ((0., 0., 0.),(0.809524, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 0.7812, 0.7812)),
'blue': ((0., 0., 0.),(1.0, 0.4975, 0.4975))}
_flag_data = {'red': ((0., 1., 1.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 1.000000, 1.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 1.000000, 1.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 1.000000, 1.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 1.000000, 1.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 1.000000, 1.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 0.000000, 0.000000),(1.0, 0., 0.)),
'green': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 1.000000, 1.000000),(0.095238, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.190476, 0.000000, 0.000000),
(0.206349, 1.000000, 1.000000),(0.222222, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 0.000000, 0.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 1.000000, 1.000000),(0.476190, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.571429, 0.000000, 0.000000),
(0.587302, 1.000000, 1.000000),(0.603175, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 0.000000, 0.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.952381, 0.000000, 0.000000),
(0.968254, 1.000000, 1.000000),(0.984127, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 1.000000, 1.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 0.000000, 0.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 1.000000, 1.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 1.000000, 1.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 1.000000, 1.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 0.000000, 0.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 1.000000, 1.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 0.000000, 0.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 1.000000, 1.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 1.000000, 1.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 1.000000, 1.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 0.000000, 0.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 1.000000, 1.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 0.000000, 0.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 1.000000, 1.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 1.000000, 1.000000),(1.0, 0., 0.))}
_gray_data = {'red': ((0., 0, 0), (1., 1, 1)),
'green': ((0., 0, 0), (1., 1, 1)),
'blue': ((0., 0, 0), (1., 1, 1))}
_hot_data = {'red': ((0., 0.0416, 0.0416),(0.365079, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.000000, 0.000000),
(0.746032, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.746032, 0.000000, 0.000000),(1.0, 1.0, 1.0))}
_hsv_data = {'red': ((0., 1., 1.),(0.158730, 1.000000, 1.000000),
(0.174603, 0.968750, 0.968750),(0.333333, 0.031250, 0.031250),
(0.349206, 0.000000, 0.000000),(0.666667, 0.000000, 0.000000),
(0.682540, 0.031250, 0.031250),(0.841270, 0.968750, 0.968750),
(0.857143, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.158730, 0.937500, 0.937500),
(0.174603, 1.000000, 1.000000),(0.507937, 1.000000, 1.000000),
(0.666667, 0.062500, 0.062500),(0.682540, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.333333, 0.000000, 0.000000),
(0.349206, 0.062500, 0.062500),(0.507937, 1.000000, 1.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.937500, 0.937500),
(1.0, 0.09375, 0.09375))}
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
_pink_data = {'red': ((0., 0.1178, 0.1178),(0.015873, 0.195857, 0.195857),
(0.031746, 0.250661, 0.250661),(0.047619, 0.295468, 0.295468),
(0.063492, 0.334324, 0.334324),(0.079365, 0.369112, 0.369112),
(0.095238, 0.400892, 0.400892),(0.111111, 0.430331, 0.430331),
(0.126984, 0.457882, 0.457882),(0.142857, 0.483867, 0.483867),
(0.158730, 0.508525, 0.508525),(0.174603, 0.532042, 0.532042),
(0.190476, 0.554563, 0.554563),(0.206349, 0.576204, 0.576204),
(0.222222, 0.597061, 0.597061),(0.238095, 0.617213, 0.617213),
(0.253968, 0.636729, 0.636729),(0.269841, 0.655663, 0.655663),
(0.285714, 0.674066, 0.674066),(0.301587, 0.691980, 0.691980),
(0.317460, 0.709441, 0.709441),(0.333333, 0.726483, 0.726483),
(0.349206, 0.743134, 0.743134),(0.365079, 0.759421, 0.759421),
(0.380952, 0.766356, 0.766356),(0.396825, 0.773229, 0.773229),
(0.412698, 0.780042, 0.780042),(0.428571, 0.786796, 0.786796),
(0.444444, 0.793492, 0.793492),(0.460317, 0.800132, 0.800132),
(0.476190, 0.806718, 0.806718),(0.492063, 0.813250, 0.813250),
(0.507937, 0.819730, 0.819730),(0.523810, 0.826160, 0.826160),
(0.539683, 0.832539, 0.832539),(0.555556, 0.838870, 0.838870),
(0.571429, 0.845154, 0.845154),(0.587302, 0.851392, 0.851392),
(0.603175, 0.857584, 0.857584),(0.619048, 0.863731, 0.863731),
(0.634921, 0.869835, 0.869835),(0.650794, 0.875897, 0.875897),
(0.666667, 0.881917, 0.881917),(0.682540, 0.887896, 0.887896),
(0.698413, 0.893835, 0.893835),(0.714286, 0.899735, 0.899735),
(0.730159, 0.905597, 0.905597),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.517549, 0.517549),(0.396825, 0.540674, 0.540674),
(0.412698, 0.562849, 0.562849),(0.428571, 0.584183, 0.584183),
(0.444444, 0.604765, 0.604765),(0.460317, 0.624669, 0.624669),
(0.476190, 0.643958, 0.643958),(0.492063, 0.662687, 0.662687),
(0.507937, 0.680900, 0.680900),(0.523810, 0.698638, 0.698638),
(0.539683, 0.715937, 0.715937),(0.555556, 0.732828, 0.732828),
(0.571429, 0.749338, 0.749338),(0.587302, 0.765493, 0.765493),
(0.603175, 0.781313, 0.781313),(0.619048, 0.796819, 0.796819),
(0.634921, 0.812029, 0.812029),(0.650794, 0.826960, 0.826960),
(0.666667, 0.841625, 0.841625),(0.682540, 0.856040, 0.856040),
(0.698413, 0.870216, 0.870216),(0.714286, 0.884164, 0.884164),
(0.730159, 0.897896, 0.897896),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.503953, 0.503953),(0.396825, 0.514344, 0.514344),
(0.412698, 0.524531, 0.524531),(0.428571, 0.534522, 0.534522),
(0.444444, 0.544331, 0.544331),(0.460317, 0.553966, 0.553966),
(0.476190, 0.563436, 0.563436),(0.492063, 0.572750, 0.572750),
(0.507937, 0.581914, 0.581914),(0.523810, 0.590937, 0.590937),
(0.539683, 0.599824, 0.599824),(0.555556, 0.608581, 0.608581),
(0.571429, 0.617213, 0.617213),(0.587302, 0.625727, 0.625727),
(0.603175, 0.634126, 0.634126),(0.619048, 0.642416, 0.642416),
(0.634921, 0.650600, 0.650600),(0.650794, 0.658682, 0.658682),
(0.666667, 0.666667, 0.666667),(0.682540, 0.674556, 0.674556),
(0.698413, 0.682355, 0.682355),(0.714286, 0.690066, 0.690066),
(0.730159, 0.697691, 0.697691),(0.746032, 0.705234, 0.705234),
(0.761905, 0.727166, 0.727166),(0.777778, 0.748455, 0.748455),
(0.793651, 0.769156, 0.769156),(0.809524, 0.789314, 0.789314),
(0.825397, 0.808969, 0.808969),(0.841270, 0.828159, 0.828159),
(0.857143, 0.846913, 0.846913),(0.873016, 0.865261, 0.865261),
(0.888889, 0.883229, 0.883229),(0.904762, 0.900837, 0.900837),
(0.920635, 0.918109, 0.918109),(0.936508, 0.935061, 0.935061),
(0.952381, 0.951711, 0.951711),(0.968254, 0.968075, 0.968075),
(0.984127, 0.984167, 0.984167),(1.0, 1.0, 1.0))}
_prism_data = {'red': ((0., 1., 1.),(0.031746, 1.000000, 1.000000),
(0.047619, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 0.666667, 0.666667),(0.095238, 1.000000, 1.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.666667, 0.666667),
(0.190476, 1.000000, 1.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 0.000000, 0.000000),(0.253968, 0.000000, 0.000000),
(0.269841, 0.666667, 0.666667),(0.285714, 1.000000, 1.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.666667, 0.666667),
(0.380952, 1.000000, 1.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 0.666667, 0.666667),(0.476190, 1.000000, 1.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.666667, 0.666667),
(0.571429, 1.000000, 1.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 0.000000, 0.000000),(0.634921, 0.000000, 0.000000),
(0.650794, 0.666667, 0.666667),(0.666667, 1.000000, 1.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.666667, 0.666667),
(0.761905, 1.000000, 1.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 0.666667, 0.666667),(0.857143, 1.000000, 1.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.666667, 0.666667),
(0.952381, 1.000000, 1.000000),(0.984127, 1.000000, 1.000000),
(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(0.031746, 1.000000, 1.000000),
(0.047619, 1.000000, 1.000000),(0.063492, 0.000000, 0.000000),
(0.095238, 0.000000, 0.000000),(0.126984, 1.000000, 1.000000),
(0.142857, 1.000000, 1.000000),(0.158730, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 1.000000, 1.000000),(0.253968, 0.000000, 0.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 1.000000, 1.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 1.000000, 1.000000),(0.444444, 0.000000, 0.000000),
(0.476190, 0.000000, 0.000000),(0.507937, 1.000000, 1.000000),
(0.523810, 1.000000, 1.000000),(0.539683, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 1.000000, 1.000000),(0.634921, 0.000000, 0.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 1.000000, 1.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 1.000000, 1.000000),(0.825397, 0.000000, 0.000000),
(0.857143, 0.000000, 0.000000),(0.888889, 1.000000, 1.000000),
(0.904762, 1.000000, 1.000000),(0.920635, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.984127, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 1.000000, 1.000000),
(0.190476, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 1.000000, 1.000000),
(0.380952, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 1.000000, 1.000000),
(0.571429, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 1.000000, 1.000000),
(0.761905, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 1.000000, 1.000000),
(0.952381, 0.000000, 0.000000),(1.0, 0.0, 0.0))}
_spring_data = {'red': ((0., 1., 1.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.0, 0.0))}
_summer_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0.5, 0.5),(1.0, 1.0, 1.0)),
'blue': ((0., 0.4, 0.4),(1.0, 0.4, 0.4))}
_winter_data = {'red': ((0., 0., 0.),(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.5, 0.5))}
_spectral_data = {'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667),
(0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.0, 0.0),
(0.30, 0.0, 0.0), (0.35, 0.0, 0.0),
(0.40, 0.0, 0.0), (0.45, 0.0, 0.0),
(0.50, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333),
(0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0),
(0.80, 1.0, 1.0), (0.85, 1.0, 1.0),
(0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80),
(1.0, 0.80, 0.80)],
'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0),
(0.10, 0.0, 0.0), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667),
(0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667),
(0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000),
(0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667),
(0.60, 1.0, 1.0), (0.65, 1.0, 1.0),
(0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000),
(0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333),
(0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667),
(0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667),
(0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667),
(0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0),
(0.5, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.0, 0.0),
(0.70, 0.0, 0.0), (0.75, 0.0, 0.0),
(0.80, 0.0, 0.0), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)]}
autumn = colors.LinearSegmentedColormap('autumn', _autumn_data, LUTSIZE)
bone = colors.LinearSegmentedColormap('bone ', _bone_data, LUTSIZE)
binary = colors.LinearSegmentedColormap('binary ', _binary_data, LUTSIZE)
cool = colors.LinearSegmentedColormap('cool', _cool_data, LUTSIZE)
copper = colors.LinearSegmentedColormap('copper', _copper_data, LUTSIZE)
flag = colors.LinearSegmentedColormap('flag', _flag_data, LUTSIZE)
gray = colors.LinearSegmentedColormap('gray', _gray_data, LUTSIZE)
hot = colors.LinearSegmentedColormap('hot', _hot_data, LUTSIZE)
hsv = colors.LinearSegmentedColormap('hsv', _hsv_data, LUTSIZE)
jet = colors.LinearSegmentedColormap('jet', _jet_data, LUTSIZE)
pink = colors.LinearSegmentedColormap('pink', _pink_data, LUTSIZE)
prism = colors.LinearSegmentedColormap('prism', _prism_data, LUTSIZE)
spring = colors.LinearSegmentedColormap('spring', _spring_data, LUTSIZE)
summer = colors.LinearSegmentedColormap('summer', _summer_data, LUTSIZE)
winter = colors.LinearSegmentedColormap('winter', _winter_data, LUTSIZE)
spectral = colors.LinearSegmentedColormap('spectral', _spectral_data, LUTSIZE)
datad = {
'autumn': _autumn_data,
'bone': _bone_data,
'binary': _binary_data,
'cool': _cool_data,
'copper': _copper_data,
'flag': _flag_data,
'gray' : _gray_data,
'hot': _hot_data,
'hsv': _hsv_data,
'jet' : _jet_data,
'pink': _pink_data,
'prism': _prism_data,
'spring': _spring_data,
'summer': _summer_data,
'winter': _winter_data,
'spectral': _spectral_data
}
# 34 colormaps based on color specifications and designs
# developed by Cynthia Brewer (http://colorbrewer.org).
# The ColorBrewer palettes have been included under the terms
# of an Apache-stype license (for details, see the file
# LICENSE_COLORBREWER in the license directory of the matplotlib
# source distribution).
_Accent_data = {'blue': [(0.0, 0.49803921580314636,
0.49803921580314636), (0.14285714285714285, 0.83137255907058716,
0.83137255907058716), (0.2857142857142857, 0.52549022436141968,
0.52549022436141968), (0.42857142857142855, 0.60000002384185791,
0.60000002384185791), (0.5714285714285714, 0.69019609689712524,
0.69019609689712524), (0.7142857142857143, 0.49803921580314636,
0.49803921580314636), (0.8571428571428571, 0.090196080505847931,
0.090196080505847931), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.78823530673980713, 0.78823530673980713),
(0.14285714285714285, 0.68235296010971069, 0.68235296010971069),
(0.2857142857142857, 0.75294119119644165, 0.75294119119644165),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.42352941632270813, 0.42352941632270813), (0.7142857142857143,
0.0078431377187371254, 0.0078431377187371254),
(0.8571428571428571, 0.35686275362968445, 0.35686275362968445),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.14285714285714285, 0.7450980544090271, 0.7450980544090271),
(0.2857142857142857, 0.99215686321258545, 0.99215686321258545),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.21960784494876862, 0.21960784494876862), (0.7142857142857143,
0.94117647409439087, 0.94117647409439087), (0.8571428571428571,
0.74901962280273438, 0.74901962280273438), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_Blues_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.93725490570068359, 0.93725490570068359),
(0.375, 0.88235294818878174, 0.88235294818878174), (0.5,
0.83921569585800171, 0.83921569585800171), (0.625, 0.7764706015586853,
0.7764706015586853), (0.75, 0.70980393886566162, 0.70980393886566162),
(0.875, 0.61176472902297974, 0.61176472902297974), (1.0,
0.41960784792900085, 0.41960784792900085)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92156863212585449, 0.92156863212585449), (0.25,
0.85882353782653809, 0.85882353782653809), (0.375,
0.7921568751335144, 0.7921568751335144), (0.5,
0.68235296010971069, 0.68235296010971069), (0.625,
0.57254904508590698, 0.57254904508590698), (0.75,
0.44313725829124451, 0.44313725829124451), (0.875,
0.31764706969261169, 0.31764706969261169), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87058824300765991, 0.87058824300765991), (0.25,
0.7764706015586853, 0.7764706015586853), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.41960784792900085, 0.41960784792900085), (0.625,
0.25882354378700256, 0.25882354378700256), (0.75,
0.12941177189350128, 0.12941177189350128), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_BrBG_data = {'blue': [(0.0, 0.019607843831181526,
0.019607843831181526), (0.10000000000000001, 0.039215687662363052,
0.039215687662363052), (0.20000000000000001, 0.17647059261798859,
0.17647059261798859), (0.29999999999999999, 0.49019607901573181,
0.49019607901573181), (0.40000000000000002, 0.76470589637756348,
0.76470589637756348), (0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.89803922176361084, 0.89803922176361084),
(0.69999999999999996, 0.75686275959014893, 0.75686275959014893),
(0.80000000000000004, 0.56078433990478516, 0.56078433990478516),
(0.90000000000000002, 0.36862745881080627, 0.36862745881080627), (1.0,
0.18823529779911041, 0.18823529779911041)],
'green': [(0.0, 0.18823529779911041, 0.18823529779911041),
(0.10000000000000001, 0.31764706969261169, 0.31764706969261169),
(0.20000000000000001, 0.5058823823928833, 0.5058823823928833),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90980392694473267, 0.90980392694473267),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.91764706373214722, 0.91764706373214722),
(0.69999999999999996, 0.80392158031463623, 0.80392158031463623),
(0.80000000000000004, 0.59215688705444336, 0.59215688705444336),
(0.90000000000000002, 0.40000000596046448, 0.40000000596046448),
(1.0, 0.23529411852359772, 0.23529411852359772)],
'red': [(0.0, 0.32941177487373352, 0.32941177487373352),
(0.10000000000000001, 0.54901963472366333, 0.54901963472366333),
(0.20000000000000001, 0.74901962280273438, 0.74901962280273438),
(0.29999999999999999, 0.87450981140136719, 0.87450981140136719),
(0.40000000000000002, 0.96470588445663452, 0.96470588445663452),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.78039216995239258, 0.78039216995239258),
(0.69999999999999996, 0.50196081399917603, 0.50196081399917603),
(0.80000000000000004, 0.20784313976764679, 0.20784313976764679),
(0.90000000000000002, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0, 0.0)]}
_BuGn_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.97647058963775635,
0.97647058963775635), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.78823530673980713,
0.78823530673980713), (0.5, 0.64313727617263794, 0.64313727617263794),
(0.625, 0.46274510025978088, 0.46274510025978088), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.92549020051956177, 0.92549020051956177), (0.375,
0.84705883264541626, 0.84705883264541626), (0.5,
0.7607843279838562, 0.7607843279838562), (0.625,
0.68235296010971069, 0.68235296010971069), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)], 'red': [(0.0,
0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.60000002384185791, 0.60000002384185791), (0.5,
0.40000000596046448, 0.40000000596046448), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_BuPu_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.95686274766921997,
0.95686274766921997), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85490196943283081,
0.85490196943283081), (0.5, 0.7764706015586853, 0.7764706015586853),
(0.625, 0.69411766529083252, 0.69411766529083252), (0.75,
0.61568629741668701, 0.61568629741668701), (0.875,
0.48627451062202454, 0.48627451062202454), (1.0, 0.29411765933036804,
0.29411765933036804)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.92549020051956177, 0.92549020051956177), (0.25,
0.82745099067687988, 0.82745099067687988), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.58823531866073608, 0.58823531866073608), (0.625,
0.41960784792900085, 0.41960784792900085), (0.75,
0.25490197539329529, 0.25490197539329529), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.74901962280273438, 0.74901962280273438), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.54901963472366333, 0.54901963472366333), (0.625,
0.54901963472366333, 0.54901963472366333), (0.75,
0.53333336114883423, 0.53333336114883423), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.30196079611778259, 0.30196079611778259)]}
_Dark2_data = {'blue': [(0.0, 0.46666666865348816,
0.46666666865348816), (0.14285714285714285, 0.0078431377187371254,
0.0078431377187371254), (0.2857142857142857, 0.70196080207824707,
0.70196080207824707), (0.42857142857142855, 0.54117649793624878,
0.54117649793624878), (0.5714285714285714, 0.11764705926179886,
0.11764705926179886), (0.7142857142857143, 0.0078431377187371254,
0.0078431377187371254), (0.8571428571428571, 0.11372549086809158,
0.11372549086809158), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.14285714285714285, 0.37254902720451355, 0.37254902720451355),
(0.2857142857142857, 0.43921568989753723, 0.43921568989753723),
(0.42857142857142855, 0.16078431904315948, 0.16078431904315948),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 0.67058825492858887, 0.67058825492858887),
(0.8571428571428571, 0.46274510025978088, 0.46274510025978088),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.10588235408067703, 0.10588235408067703),
(0.14285714285714285, 0.85098040103912354, 0.85098040103912354),
(0.2857142857142857, 0.45882353186607361, 0.45882353186607361),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.40000000596046448, 0.40000000596046448),
(0.7142857142857143, 0.90196079015731812, 0.90196079015731812),
(0.8571428571428571, 0.65098041296005249, 0.65098041296005249),
(1.0, 0.40000000596046448, 0.40000000596046448)]}
_GnBu_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.85882353782653809,
0.85882353782653809), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.70980393886566162,
0.70980393886566162), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.82745099067687988, 0.82745099067687988), (0.75,
0.7450980544090271, 0.7450980544090271), (0.875, 0.67450982332229614,
0.67450982332229614), (1.0, 0.5058823823928833, 0.5058823823928833)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.9529411792755127, 0.9529411792755127), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.86666667461395264, 0.86666667461395264), (0.5,
0.80000001192092896, 0.80000001192092896), (0.625,
0.70196080207824707, 0.70196080207824707), (0.75,
0.54901963472366333, 0.54901963472366333), (0.875,
0.40784314274787903, 0.40784314274787903), (1.0,
0.25098040699958801, 0.25098040699958801)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.65882354974746704, 0.65882354974746704), (0.5,
0.48235294222831726, 0.48235294222831726), (0.625,
0.30588236451148987, 0.30588236451148987), (0.75,
0.16862745583057404, 0.16862745583057404), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_Greens_data = {'blue': [(0.0, 0.96078431606292725,
0.96078431606292725), (0.125, 0.87843137979507446,
0.87843137979507446), (0.25, 0.75294119119644165,
0.75294119119644165), (0.375, 0.60784316062927246,
0.60784316062927246), (0.5, 0.46274510025978088, 0.46274510025978088),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.91372549533843994, 0.91372549533843994), (0.375,
0.85098040103912354, 0.85098040103912354), (0.5,
0.76862746477127075, 0.76862746477127075), (0.625,
0.67058825492858887, 0.67058825492858887), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.78039216995239258, 0.78039216995239258), (0.375,
0.63137257099151611, 0.63137257099151611), (0.5,
0.45490196347236633, 0.45490196347236633), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_Greys_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608, 0.58823531866073608),
(0.625, 0.45098039507865906, 0.45098039507865906), (0.75,
0.32156863808631897, 0.32156863808631897), (0.875,
0.14509804546833038, 0.14509804546833038), (1.0, 0.0, 0.0)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)]}
_Oranges_data = {'blue': [(0.0, 0.92156863212585449,
0.92156863212585449), (0.125, 0.80784314870834351,
0.80784314870834351), (0.25, 0.63529413938522339,
0.63529413938522339), (0.375, 0.41960784792900085,
0.41960784792900085), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.074509806931018829, 0.074509806931018829), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.011764706112444401, 0.011764706112444401), (1.0,
0.015686275437474251, 0.015686275437474251)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.90196079015731812, 0.90196079015731812), (0.25,
0.81568628549575806, 0.81568628549575806), (0.375,
0.68235296010971069, 0.68235296010971069), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.4117647111415863, 0.4117647111415863), (0.75,
0.28235295414924622, 0.28235295414924622), (0.875,
0.21176470816135406, 0.21176470816135406), (1.0,
0.15294118225574493, 0.15294118225574493)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.99215686321258545,
0.99215686321258545), (0.625, 0.94509804248809814,
0.94509804248809814), (0.75, 0.85098040103912354,
0.85098040103912354), (0.875, 0.65098041296005249,
0.65098041296005249), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_OrRd_data = {'blue': [(0.0, 0.92549020051956177,
0.92549020051956177), (0.125, 0.78431373834609985,
0.78431373834609985), (0.25, 0.61960786581039429,
0.61960786581039429), (0.375, 0.51764708757400513,
0.51764708757400513), (0.5, 0.3490196168422699, 0.3490196168422699),
(0.625, 0.28235295414924622, 0.28235295414924622), (0.75,
0.12156862765550613, 0.12156862765550613), (0.875, 0.0, 0.0), (1.0,
0.0, 0.0)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90980392694473267, 0.90980392694473267), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.73333334922790527, 0.73333334922790527), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.3960784375667572, 0.3960784375667572), (0.75,
0.18823529779911041, 0.18823529779911041), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.98823529481887817,
0.98823529481887817), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.84313726425170898,
0.84313726425170898), (0.875, 0.70196080207824707,
0.70196080207824707), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_Paired_data = {'blue': [(0.0, 0.89019608497619629,
0.89019608497619629), (0.090909090909090912, 0.70588237047195435,
0.70588237047195435), (0.18181818181818182, 0.54117649793624878,
0.54117649793624878), (0.27272727272727271, 0.17254902422428131,
0.17254902422428131), (0.36363636363636365, 0.60000002384185791,
0.60000002384185791), (0.45454545454545453, 0.10980392247438431,
0.10980392247438431), (0.54545454545454541, 0.43529412150382996,
0.43529412150382996), (0.63636363636363635, 0.0, 0.0),
(0.72727272727272729, 0.83921569585800171, 0.83921569585800171),
(0.81818181818181823, 0.60392159223556519, 0.60392159223556519),
(0.90909090909090906, 0.60000002384185791, 0.60000002384185791), (1.0,
0.15686275064945221, 0.15686275064945221)],
'green': [(0.0, 0.80784314870834351, 0.80784314870834351),
(0.090909090909090912, 0.47058823704719543, 0.47058823704719543),
(0.18181818181818182, 0.87450981140136719, 0.87450981140136719),
(0.27272727272727271, 0.62745100259780884, 0.62745100259780884),
(0.36363636363636365, 0.60392159223556519, 0.60392159223556519),
(0.45454545454545453, 0.10196078568696976, 0.10196078568696976),
(0.54545454545454541, 0.74901962280273438, 0.74901962280273438),
(0.63636363636363635, 0.49803921580314636, 0.49803921580314636),
(0.72727272727272729, 0.69803923368453979, 0.69803923368453979),
(0.81818181818181823, 0.23921568691730499, 0.23921568691730499),
(0.90909090909090906, 1.0, 1.0), (1.0, 0.3490196168422699,
0.3490196168422699)],
'red': [(0.0, 0.65098041296005249, 0.65098041296005249),
(0.090909090909090912, 0.12156862765550613, 0.12156862765550613),
(0.18181818181818182, 0.69803923368453979, 0.69803923368453979),
(0.27272727272727271, 0.20000000298023224, 0.20000000298023224),
(0.36363636363636365, 0.9843137264251709, 0.9843137264251709),
(0.45454545454545453, 0.89019608497619629, 0.89019608497619629),
(0.54545454545454541, 0.99215686321258545, 0.99215686321258545),
(0.63636363636363635, 1.0, 1.0), (0.72727272727272729,
0.7921568751335144, 0.7921568751335144), (0.81818181818181823,
0.41568627953529358, 0.41568627953529358), (0.90909090909090906,
1.0, 1.0), (1.0, 0.69411766529083252, 0.69411766529083252)]}
_Pastel1_data = {'blue': [(0.0, 0.68235296010971069,
0.68235296010971069), (0.125, 0.89019608497619629,
0.89019608497619629), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.89411765336990356,
0.89411765336990356), (0.5, 0.65098041296005249, 0.65098041296005249),
(0.625, 0.80000001192092896, 0.80000001192092896), (0.75,
0.74117648601531982, 0.74117648601531982), (0.875,
0.92549020051956177, 0.92549020051956177), (1.0, 0.94901961088180542,
0.94901961088180542)],
'green': [(0.0, 0.70588237047195435, 0.70588237047195435), (0.125,
0.80392158031463623, 0.80392158031463623), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.79607844352722168, 0.79607844352722168), (0.5,
0.85098040103912354, 0.85098040103912354), (0.625, 1.0, 1.0),
(0.75, 0.84705883264541626, 0.84705883264541626), (0.875,
0.85490196943283081, 0.85490196943283081), (1.0,
0.94901961088180542, 0.94901961088180542)],
'red': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.70196080207824707, 0.70196080207824707), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.87058824300765991, 0.87058824300765991), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625, 1.0, 1.0),
(0.75, 0.89803922176361084, 0.89803922176361084), (0.875,
0.99215686321258545, 0.99215686321258545), (1.0,
0.94901961088180542, 0.94901961088180542)]}
_Pastel2_data = {'blue': [(0.0, 0.80392158031463623,
0.80392158031463623), (0.14285714285714285, 0.67450982332229614,
0.67450982332229614), (0.2857142857142857, 0.90980392694473267,
0.90980392694473267), (0.42857142857142855, 0.89411765336990356,
0.89411765336990356), (0.5714285714285714, 0.78823530673980713,
0.78823530673980713), (0.7142857142857143, 0.68235296010971069,
0.68235296010971069), (0.8571428571428571, 0.80000001192092896,
0.80000001192092896), (1.0, 0.80000001192092896,
0.80000001192092896)],
'green': [(0.0, 0.88627451658248901, 0.88627451658248901),
(0.14285714285714285, 0.80392158031463623, 0.80392158031463623),
(0.2857142857142857, 0.83529412746429443, 0.83529412746429443),
(0.42857142857142855, 0.7921568751335144, 0.7921568751335144),
(0.5714285714285714, 0.96078431606292725, 0.96078431606292725),
(0.7142857142857143, 0.94901961088180542, 0.94901961088180542),
(0.8571428571428571, 0.88627451658248901, 0.88627451658248901),
(1.0, 0.80000001192092896, 0.80000001192092896)],
'red': [(0.0, 0.70196080207824707, 0.70196080207824707),
(0.14285714285714285, 0.99215686321258545, 0.99215686321258545),
(0.2857142857142857, 0.79607844352722168, 0.79607844352722168),
(0.42857142857142855, 0.95686274766921997, 0.95686274766921997),
(0.5714285714285714, 0.90196079015731812, 0.90196079015731812),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.94509804248809814, 0.94509804248809814), (1.0,
0.80000001192092896, 0.80000001192092896)]}
_PiYG_data = {'blue': [(0.0, 0.32156863808631897,
0.32156863808631897), (0.10000000000000001, 0.49019607901573181,
0.49019607901573181), (0.20000000000000001, 0.68235296010971069,
0.68235296010971069), (0.29999999999999999, 0.85490196943283081,
0.85490196943283081), (0.40000000000000002, 0.93725490570068359,
0.93725490570068359), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81568628549575806, 0.81568628549575806),
(0.69999999999999996, 0.52549022436141968, 0.52549022436141968),
(0.80000000000000004, 0.25490197539329529, 0.25490197539329529),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128), (1.0,
0.098039217293262482, 0.098039217293262482)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.10588235408067703, 0.10588235408067703),
(0.20000000000000001, 0.46666666865348816, 0.46666666865348816),
(0.29999999999999999, 0.7137255072593689, 0.7137255072593689),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.96078431606292725, 0.96078431606292725),
(0.69999999999999996, 0.88235294818878174, 0.88235294818878174),
(0.80000000000000004, 0.73725491762161255, 0.73725491762161255),
(0.90000000000000002, 0.57254904508590698, 0.57254904508590698),
(1.0, 0.39215686917304993, 0.39215686917304993)],
'red': [(0.0, 0.55686277151107788, 0.55686277151107788),
(0.10000000000000001, 0.77254903316497803, 0.77254903316497803),
(0.20000000000000001, 0.87058824300765991, 0.87058824300765991),
(0.29999999999999999, 0.94509804248809814, 0.94509804248809814),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.90196079015731812, 0.90196079015731812),
(0.69999999999999996, 0.72156864404678345, 0.72156864404678345),
(0.80000000000000004, 0.49803921580314636, 0.49803921580314636),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.15294118225574493, 0.15294118225574493)]}
_PRGn_data = {'blue': [(0.0, 0.29411765933036804,
0.29411765933036804), (0.10000000000000001, 0.51372551918029785,
0.51372551918029785), (0.20000000000000001, 0.67058825492858887,
0.67058825492858887), (0.29999999999999999, 0.81176471710205078,
0.81176471710205078), (0.40000000000000002, 0.90980392694473267,
0.90980392694473267), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.82745099067687988, 0.82745099067687988),
(0.69999999999999996, 0.62745100259780884, 0.62745100259780884),
(0.80000000000000004, 0.3803921639919281, 0.3803921639919281),
(0.90000000000000002, 0.21568627655506134, 0.21568627655506134), (1.0,
0.10588235408067703, 0.10588235408067703)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.16470588743686676, 0.16470588743686676), (0.20000000000000001,
0.43921568989753723, 0.43921568989753723), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.83137255907058716, 0.83137255907058716), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.94117647409439087, 0.94117647409439087), (0.69999999999999996,
0.85882353782653809, 0.85882353782653809), (0.80000000000000004,
0.68235296010971069, 0.68235296010971069), (0.90000000000000002,
0.47058823704719543, 0.47058823704719543), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.25098040699958801, 0.25098040699958801),
(0.10000000000000001, 0.46274510025978088, 0.46274510025978088),
(0.20000000000000001, 0.60000002384185791, 0.60000002384185791),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90588235855102539, 0.90588235855102539),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85098040103912354, 0.85098040103912354),
(0.69999999999999996, 0.65098041296005249, 0.65098041296005249),
(0.80000000000000004, 0.35294118523597717, 0.35294118523597717),
(0.90000000000000002, 0.10588235408067703, 0.10588235408067703),
(1.0, 0.0, 0.0)]}
_PuBu_data = {'blue': [(0.0, 0.9843137264251709, 0.9843137264251709),
(0.125, 0.94901961088180542, 0.94901961088180542), (0.25,
0.90196079015731812, 0.90196079015731812), (0.375,
0.85882353782653809, 0.85882353782653809), (0.5, 0.81176471710205078,
0.81176471710205078), (0.625, 0.75294119119644165,
0.75294119119644165), (0.75, 0.69019609689712524,
0.69019609689712524), (0.875, 0.55294120311737061,
0.55294120311737061), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.43921568989753723, 0.43921568989753723), (0.875,
0.35294118523597717, 0.35294118523597717), (1.0,
0.21960784494876862, 0.21960784494876862)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.45490196347236633,
0.45490196347236633), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.019607843831181526,
0.019607843831181526), (0.875, 0.015686275437474251,
0.015686275437474251), (1.0, 0.0078431377187371254,
0.0078431377187371254)]}
_PuBuGn_data = {'blue': [(0.0, 0.9843137264251709,
0.9843137264251709), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85882353782653809,
0.85882353782653809), (0.5, 0.81176471710205078, 0.81176471710205078),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.54117649793624878, 0.54117649793624878), (0.875, 0.3490196168422699,
0.3490196168422699), (1.0, 0.21176470816135406, 0.21176470816135406)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.88627451658248901, 0.88627451658248901), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.5058823823928833, 0.5058823823928833), (0.875,
0.42352941632270813, 0.42352941632270813), (1.0,
0.27450981736183167, 0.27450981736183167)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.40392157435417175,
0.40392157435417175), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.0078431377187371254,
0.0078431377187371254), (0.875, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0039215688593685627,
0.0039215688593685627)]}
_PuOr_data = {'blue': [(0.0, 0.031372550874948502,
0.031372550874948502), (0.10000000000000001, 0.023529412224888802,
0.023529412224888802), (0.20000000000000001, 0.078431375324726105,
0.078431375324726105), (0.29999999999999999, 0.38823530077934265,
0.38823530077934265), (0.40000000000000002, 0.7137255072593689,
0.7137255072593689), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.92156863212585449, 0.92156863212585449),
(0.69999999999999996, 0.82352942228317261, 0.82352942228317261),
(0.80000000000000004, 0.67450982332229614, 0.67450982332229614),
(0.90000000000000002, 0.53333336114883423, 0.53333336114883423), (1.0,
0.29411765933036804, 0.29411765933036804)],
'green': [(0.0, 0.23137255012989044, 0.23137255012989044),
(0.10000000000000001, 0.34509804844856262, 0.34509804844856262),
(0.20000000000000001, 0.50980395078659058, 0.50980395078659058),
(0.29999999999999999, 0.72156864404678345, 0.72156864404678345),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85490196943283081, 0.85490196943283081),
(0.69999999999999996, 0.67058825492858887, 0.67058825492858887),
(0.80000000000000004, 0.45098039507865906, 0.45098039507865906),
(0.90000000000000002, 0.15294118225574493, 0.15294118225574493),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.10000000000000001, 0.70196080207824707, 0.70196080207824707),
(0.20000000000000001, 0.87843137979507446, 0.87843137979507446),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.84705883264541626, 0.84705883264541626),
(0.69999999999999996, 0.69803923368453979, 0.69803923368453979),
(0.80000000000000004, 0.50196081399917603, 0.50196081399917603),
(0.90000000000000002, 0.32941177487373352, 0.32941177487373352),
(1.0, 0.17647059261798859, 0.17647059261798859)]}
_PuRd_data = {'blue': [(0.0, 0.97647058963775635,
0.97647058963775635), (0.125, 0.93725490570068359,
0.93725490570068359), (0.25, 0.85490196943283081,
0.85490196943283081), (0.375, 0.78039216995239258,
0.78039216995239258), (0.5, 0.69019609689712524, 0.69019609689712524),
(0.625, 0.54117649793624878, 0.54117649793624878), (0.75,
0.33725491166114807, 0.33725491166114807), (0.875,
0.26274511218070984, 0.26274511218070984), (1.0, 0.12156862765550613,
0.12156862765550613)],
'green': [(0.0, 0.95686274766921997, 0.95686274766921997), (0.125,
0.88235294818878174, 0.88235294818878174), (0.25,
0.72549021244049072, 0.72549021244049072), (0.375,
0.58039218187332153, 0.58039218187332153), (0.5,
0.3960784375667572, 0.3960784375667572), (0.625,
0.16078431904315948, 0.16078431904315948), (0.75,
0.070588238537311554, 0.070588238537311554), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.78823530673980713, 0.78823530673980713), (0.5,
0.87450981140136719, 0.87450981140136719), (0.625,
0.90588235855102539, 0.90588235855102539), (0.75,
0.80784314870834351, 0.80784314870834351), (0.875,
0.59607845544815063, 0.59607845544815063), (1.0,
0.40392157435417175, 0.40392157435417175)]}
_Purples_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.96078431606292725,
0.96078431606292725), (0.25, 0.92156863212585449,
0.92156863212585449), (0.375, 0.86274510622024536,
0.86274510622024536), (0.5, 0.78431373834609985, 0.78431373834609985),
(0.625, 0.729411780834198, 0.729411780834198), (0.75,
0.63921570777893066, 0.63921570777893066), (0.875,
0.56078433990478516, 0.56078433990478516), (1.0, 0.49019607901573181,
0.49019607901573181)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92941176891326904, 0.92941176891326904), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.60392159223556519, 0.60392159223556519), (0.625,
0.49019607901573181, 0.49019607901573181), (0.75,
0.31764706969261169, 0.31764706969261169), (0.875,
0.15294118225574493, 0.15294118225574493), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.93725490570068359, 0.93725490570068359), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.61960786581039429, 0.61960786581039429), (0.625,
0.50196081399917603, 0.50196081399917603), (0.75,
0.41568627953529358, 0.41568627953529358), (0.875,
0.32941177487373352, 0.32941177487373352), (1.0,
0.24705882370471954, 0.24705882370471954)]}
_RdBu_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.94117647409439087, 0.94117647409439087),
(0.69999999999999996, 0.87058824300765991, 0.87058824300765991),
(0.80000000000000004, 0.76470589637756348, 0.76470589637756348),
(0.90000000000000002, 0.67450982332229614, 0.67450982332229614), (1.0,
0.3803921639919281, 0.3803921639919281)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.89803922176361084, 0.89803922176361084), (0.69999999999999996,
0.77254903316497803, 0.77254903316497803), (0.80000000000000004,
0.57647061347961426, 0.57647061347961426), (0.90000000000000002,
0.40000000596046448, 0.40000000596046448), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81960785388946533, 0.81960785388946533),
(0.69999999999999996, 0.57254904508590698, 0.57254904508590698),
(0.80000000000000004, 0.26274511218070984, 0.26274511218070984),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128),
(1.0, 0.019607843831181526, 0.019607843831181526)]}
_RdGy_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 1.0, 1.0), (0.59999999999999998,
0.87843137979507446, 0.87843137979507446), (0.69999999999999996,
0.729411780834198, 0.729411780834198), (0.80000000000000004,
0.52941179275512695, 0.52941179275512695), (0.90000000000000002,
0.30196079611778259, 0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.87843137979507446, 0.87843137979507446),
(0.69999999999999996, 0.729411780834198, 0.729411780834198),
(0.80000000000000004, 0.52941179275512695, 0.52941179275512695),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.10196078568696976, 0.10196078568696976)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.87843137979507446,
0.87843137979507446), (0.69999999999999996, 0.729411780834198,
0.729411780834198), (0.80000000000000004, 0.52941179275512695,
0.52941179275512695), (0.90000000000000002, 0.30196079611778259,
0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)]}
_RdPu_data = {'blue': [(0.0, 0.9529411792755127, 0.9529411792755127),
(0.125, 0.86666667461395264, 0.86666667461395264), (0.25,
0.75294119119644165, 0.75294119119644165), (0.375,
0.70980393886566162, 0.70980393886566162), (0.5, 0.63137257099151611,
0.63137257099151611), (0.625, 0.59215688705444336,
0.59215688705444336), (0.75, 0.49411764740943909,
0.49411764740943909), (0.875, 0.46666666865348816,
0.46666666865348816), (1.0, 0.41568627953529358,
0.41568627953529358)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.77254903316497803, 0.77254903316497803), (0.375,
0.62352943420410156, 0.62352943420410156), (0.5,
0.40784314274787903, 0.40784314274787903), (0.625,
0.20392157137393951, 0.20392157137393951), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.0039215688593685627, 0.0039215688593685627), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99215686321258545,
0.99215686321258545), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98039215803146362,
0.98039215803146362), (0.5, 0.9686274528503418,
0.9686274528503418), (0.625, 0.86666667461395264,
0.86666667461395264), (0.75, 0.68235296010971069,
0.68235296010971069), (0.875, 0.47843137383460999,
0.47843137383460999), (1.0, 0.28627452254295349,
0.28627452254295349)]}
_RdYlBu_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000149011612,
0.15294118225574493, 0.15294118225574493),
(0.20000000298023224, 0.26274511218070984,
0.26274511218070984), (0.30000001192092896,
0.3803921639919281, 0.3803921639919281),
(0.40000000596046448, 0.56470590829849243,
0.56470590829849243), (0.5, 0.74901962280273438,
0.74901962280273438), (0.60000002384185791,
0.97254902124404907, 0.97254902124404907),
(0.69999998807907104, 0.91372549533843994,
0.91372549533843994), (0.80000001192092896,
0.81960785388946533, 0.81960785388946533),
(0.89999997615814209, 0.70588237047195435,
0.70588237047195435), (1.0, 0.58431375026702881,
0.58431375026702881)], 'green': [(0.0, 0.0, 0.0),
(0.10000000149011612, 0.18823529779911041,
0.18823529779911041), (0.20000000298023224,
0.42745098471641541, 0.42745098471641541),
(0.30000001192092896, 0.68235296010971069,
0.68235296010971069), (0.40000000596046448,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0,
1.0), (0.60000002384185791, 0.9529411792755127,
0.9529411792755127), (0.69999998807907104,
0.85098040103912354, 0.85098040103912354),
(0.80000001192092896, 0.67843139171600342,
0.67843139171600342), (0.89999997615814209,
0.45882353186607361, 0.45882353186607361), (1.0,
0.21176470816135406, 0.21176470816135406)], 'red':
[(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000149011612, 0.84313726425170898,
0.84313726425170898), (0.20000000298023224,
0.95686274766921997, 0.95686274766921997),
(0.30000001192092896, 0.99215686321258545,
0.99215686321258545), (0.40000000596046448,
0.99607843160629272, 0.99607843160629272), (0.5, 1.0,
1.0), (0.60000002384185791, 0.87843137979507446,
0.87843137979507446), (0.69999998807907104,
0.67058825492858887, 0.67058825492858887),
(0.80000001192092896, 0.45490196347236633,
0.45490196347236633), (0.89999997615814209,
0.27058824896812439, 0.27058824896812439), (1.0,
0.19215686619281769, 0.19215686619281769)]}
_RdYlGn_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000000000001, 0.15294118225574493,
0.15294118225574493), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.54509806632995605, 0.54509806632995605),
(0.69999999999999996, 0.41568627953529358, 0.41568627953529358),
(0.80000000000000004, 0.38823530077934265, 0.38823530077934265),
(0.90000000000000002, 0.31372550129890442, 0.31372550129890442), (1.0,
0.21568627655506134, 0.21568627655506134)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.18823529779911041, 0.18823529779911041), (0.20000000000000001,
0.42745098471641541, 0.42745098471641541), (0.29999999999999999,
0.68235296010971069, 0.68235296010971069), (0.40000000000000002,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.93725490570068359, 0.93725490570068359),
(0.69999999999999996, 0.85098040103912354, 0.85098040103912354),
(0.80000000000000004, 0.74117648601531982, 0.74117648601531982),
(0.90000000000000002, 0.59607845544815063, 0.59607845544815063),
(1.0, 0.40784314274787903, 0.40784314274787903)],
'red': [(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000000000001, 0.84313726425170898, 0.84313726425170898),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.85098040103912354,
0.85098040103912354), (0.69999999999999996, 0.65098041296005249,
0.65098041296005249), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.10196078568696976,
0.10196078568696976), (1.0, 0.0, 0.0)]}
_Reds_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.82352942228317261,
0.82352942228317261), (0.25, 0.63137257099151611,
0.63137257099151611), (0.375, 0.44705882668495178,
0.44705882668495178), (0.5, 0.29019609093666077, 0.29019609093666077),
(0.625, 0.17254902422428131, 0.17254902422428131), (0.75,
0.11372549086809158, 0.11372549086809158), (0.875,
0.08235294371843338, 0.08235294371843338), (1.0, 0.050980392843484879,
0.050980392843484879)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.73333334922790527, 0.73333334922790527), (0.375,
0.57254904508590698, 0.57254904508590698), (0.5,
0.41568627953529358, 0.41568627953529358), (0.625,
0.23137255012989044, 0.23137255012989044), (0.75,
0.094117648899555206, 0.094117648899555206), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98823529481887817,
0.98823529481887817), (0.5, 0.9843137264251709,
0.9843137264251709), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.79607844352722168,
0.79607844352722168), (0.875, 0.64705884456634521,
0.64705884456634521), (1.0, 0.40392157435417175,
0.40392157435417175)]}
_Set1_data = {'blue': [(0.0, 0.10980392247438431,
0.10980392247438431), (0.125, 0.72156864404678345,
0.72156864404678345), (0.25, 0.29019609093666077,
0.29019609093666077), (0.375, 0.63921570777893066,
0.63921570777893066), (0.5, 0.0, 0.0), (0.625, 0.20000000298023224,
0.20000000298023224), (0.75, 0.15686275064945221,
0.15686275064945221), (0.875, 0.74901962280273438,
0.74901962280273438), (1.0, 0.60000002384185791,
0.60000002384185791)],
'green': [(0.0, 0.10196078568696976, 0.10196078568696976), (0.125,
0.49411764740943909, 0.49411764740943909), (0.25,
0.68627452850341797, 0.68627452850341797), (0.375,
0.30588236451148987, 0.30588236451148987), (0.5,
0.49803921580314636, 0.49803921580314636), (0.625, 1.0, 1.0),
(0.75, 0.33725491166114807, 0.33725491166114807), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.60000002384185791, 0.60000002384185791)],
'red': [(0.0, 0.89411765336990356, 0.89411765336990356), (0.125,
0.21568627655506134, 0.21568627655506134), (0.25,
0.30196079611778259, 0.30196079611778259), (0.375,
0.59607845544815063, 0.59607845544815063), (0.5, 1.0, 1.0),
(0.625, 1.0, 1.0), (0.75, 0.65098041296005249,
0.65098041296005249), (0.875, 0.9686274528503418,
0.9686274528503418), (1.0, 0.60000002384185791,
0.60000002384185791)]}
_Set2_data = {'blue': [(0.0, 0.64705884456634521,
0.64705884456634521), (0.14285714285714285, 0.38431373238563538,
0.38431373238563538), (0.2857142857142857, 0.79607844352722168,
0.79607844352722168), (0.42857142857142855, 0.76470589637756348,
0.76470589637756348), (0.5714285714285714, 0.32941177487373352,
0.32941177487373352), (0.7142857142857143, 0.18431372940540314,
0.18431372940540314), (0.8571428571428571, 0.58039218187332153,
0.58039218187332153), (1.0, 0.70196080207824707,
0.70196080207824707)],
'green': [(0.0, 0.7607843279838562, 0.7607843279838562),
(0.14285714285714285, 0.55294120311737061, 0.55294120311737061),
(0.2857142857142857, 0.62745100259780884, 0.62745100259780884),
(0.42857142857142855, 0.54117649793624878, 0.54117649793624878),
(0.5714285714285714, 0.84705883264541626, 0.84705883264541626),
(0.7142857142857143, 0.85098040103912354, 0.85098040103912354),
(0.8571428571428571, 0.76862746477127075, 0.76862746477127075),
(1.0, 0.70196080207824707, 0.70196080207824707)],
'red': [(0.0, 0.40000000596046448, 0.40000000596046448),
(0.14285714285714285, 0.98823529481887817, 0.98823529481887817),
(0.2857142857142857, 0.55294120311737061, 0.55294120311737061),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.89803922176361084, 0.89803922176361084), (1.0,
0.70196080207824707, 0.70196080207824707)]}
_Set3_data = {'blue': [(0.0, 0.78039216995239258,
0.78039216995239258), (0.090909090909090912, 0.70196080207824707,
0.70196080207824707), (0.18181818181818182, 0.85490196943283081,
0.85490196943283081), (0.27272727272727271, 0.44705882668495178,
0.44705882668495178), (0.36363636363636365, 0.82745099067687988,
0.82745099067687988), (0.45454545454545453, 0.38431373238563538,
0.38431373238563538), (0.54545454545454541, 0.4117647111415863,
0.4117647111415863), (0.63636363636363635, 0.89803922176361084,
0.89803922176361084), (0.72727272727272729, 0.85098040103912354,
0.85098040103912354), (0.81818181818181823, 0.74117648601531982,
0.74117648601531982), (0.90909090909090906, 0.77254903316497803,
0.77254903316497803), (1.0, 0.43529412150382996,
0.43529412150382996)],
'green': [(0.0, 0.82745099067687988, 0.82745099067687988),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.729411780834198, 0.729411780834198), (0.27272727272727271,
0.50196081399917603, 0.50196081399917603), (0.36363636363636365,
0.69411766529083252, 0.69411766529083252), (0.45454545454545453,
0.70588237047195435, 0.70588237047195435), (0.54545454545454541,
0.87058824300765991, 0.87058824300765991), (0.63636363636363635,
0.80392158031463623, 0.80392158031463623), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.50196081399917603, 0.50196081399917603), (0.90909090909090906,
0.92156863212585449, 0.92156863212585449), (1.0,
0.92941176891326904, 0.92941176891326904)],
'red': [(0.0, 0.55294120311737061, 0.55294120311737061),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.7450980544090271, 0.7450980544090271), (0.27272727272727271,
0.9843137264251709, 0.9843137264251709), (0.36363636363636365,
0.50196081399917603, 0.50196081399917603), (0.45454545454545453,
0.99215686321258545, 0.99215686321258545), (0.54545454545454541,
0.70196080207824707, 0.70196080207824707), (0.63636363636363635,
0.98823529481887817, 0.98823529481887817), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.73725491762161255, 0.73725491762161255), (0.90909090909090906,
0.80000001192092896, 0.80000001192092896), (1.0, 1.0, 1.0)]}
_Spectral_data = {'blue': [(0.0, 0.25882354378700256,
0.25882354378700256), (0.10000000000000001, 0.30980393290519714,
0.30980393290519714), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.59607845544815063, 0.59607845544815063),
(0.69999999999999996, 0.64313727617263794, 0.64313727617263794),
(0.80000000000000004, 0.64705884456634521, 0.64705884456634521),
(0.90000000000000002, 0.74117648601531982, 0.74117648601531982), (1.0,
0.63529413938522339, 0.63529413938522339)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.24313725531101227, 0.24313725531101227),
(0.20000000000000001, 0.42745098471641541, 0.42745098471641541),
(0.29999999999999999, 0.68235296010971069, 0.68235296010971069),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.96078431606292725,
0.96078431606292725), (0.69999999999999996, 0.86666667461395264,
0.86666667461395264), (0.80000000000000004, 0.7607843279838562,
0.7607843279838562), (0.90000000000000002, 0.53333336114883423,
0.53333336114883423), (1.0, 0.30980393290519714,
0.30980393290519714)],
'red': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.10000000000000001, 0.83529412746429443, 0.83529412746429443),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.90196079015731812,
0.90196079015731812), (0.69999999999999996, 0.67058825492858887,
0.67058825492858887), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.19607843458652496,
0.19607843458652496), (1.0, 0.36862745881080627,
0.36862745881080627)]}
_YlGn_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.72549021244049072,
0.72549021244049072), (0.25, 0.63921570777893066,
0.63921570777893066), (0.375, 0.55686277151107788,
0.55686277151107788), (0.5, 0.47450980544090271, 0.47450980544090271),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.26274511218070984, 0.26274511218070984), (0.875,
0.21568627655506134, 0.21568627655506134), (1.0, 0.16078431904315948,
0.16078431904315948)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.98823529481887817,
0.98823529481887817), (0.25, 0.94117647409439087,
0.94117647409439087), (0.375, 0.86666667461395264,
0.86666667461395264), (0.5, 0.7764706015586853,
0.7764706015586853), (0.625, 0.67058825492858887,
0.67058825492858887), (0.75, 0.51764708757400513,
0.51764708757400513), (0.875, 0.40784314274787903,
0.40784314274787903), (1.0, 0.27058824896812439,
0.27058824896812439)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.67843139171600342,
0.67843139171600342), (0.5, 0.47058823704719543,
0.47058823704719543), (0.625, 0.25490197539329529,
0.25490197539329529), (0.75, 0.13725490868091583,
0.13725490868091583), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)]}
_YlGnBu_data = {'blue': [(0.0, 0.85098040103912354,
0.85098040103912354), (0.125, 0.69411766529083252,
0.69411766529083252), (0.25, 0.70588237047195435,
0.70588237047195435), (0.375, 0.73333334922790527,
0.73333334922790527), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.65882354974746704, 0.65882354974746704), (0.875,
0.58039218187332153, 0.58039218187332153), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.97254902124404907,
0.97254902124404907), (0.25, 0.91372549533843994,
0.91372549533843994), (0.375, 0.80392158031463623,
0.80392158031463623), (0.5, 0.7137255072593689,
0.7137255072593689), (0.625, 0.56862747669219971,
0.56862747669219971), (0.75, 0.36862745881080627,
0.36862745881080627), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.11372549086809158,
0.11372549086809158)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.78039216995239258,
0.78039216995239258), (0.375, 0.49803921580314636,
0.49803921580314636), (0.5, 0.25490197539329529,
0.25490197539329529), (0.625, 0.11372549086809158,
0.11372549086809158), (0.75, 0.13333334028720856,
0.13333334028720856), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.031372550874948502,
0.031372550874948502)]}
_YlOrBr_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.73725491762161255,
0.73725491762161255), (0.25, 0.56862747669219971,
0.56862747669219971), (0.375, 0.30980393290519714,
0.30980393290519714), (0.5, 0.16078431904315948, 0.16078431904315948),
(0.625, 0.078431375324726105, 0.078431375324726105), (0.75,
0.0078431377187371254, 0.0078431377187371254), (0.875,
0.015686275437474251, 0.015686275437474251), (1.0,
0.023529412224888802, 0.023529412224888802)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.89019608497619629,
0.89019608497619629), (0.375, 0.76862746477127075,
0.76862746477127075), (0.5, 0.60000002384185791,
0.60000002384185791), (0.625, 0.43921568989753723,
0.43921568989753723), (0.75, 0.29803922772407532,
0.29803922772407532), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.14509804546833038,
0.14509804546833038)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625,
0.92549020051956177, 0.92549020051956177), (0.75,
0.80000001192092896, 0.80000001192092896), (0.875,
0.60000002384185791, 0.60000002384185791), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_YlOrRd_data = {'blue': [(0.0, 0.80000001192092896,
0.80000001192092896), (0.125, 0.62745100259780884,
0.62745100259780884), (0.25, 0.46274510025978088,
0.46274510025978088), (0.375, 0.29803922772407532,
0.29803922772407532), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.16470588743686676, 0.16470588743686676), (0.75,
0.10980392247438431, 0.10980392247438431), (0.875,
0.14901961386203766, 0.14901961386203766), (1.0, 0.14901961386203766,
0.14901961386203766)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.69803923368453979,
0.69803923368453979), (0.5, 0.55294120311737061,
0.55294120311737061), (0.625, 0.30588236451148987,
0.30588236451148987), (0.75, 0.10196078568696976,
0.10196078568696976), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99215686321258545, 0.99215686321258545), (0.625,
0.98823529481887817, 0.98823529481887817), (0.75,
0.89019608497619629, 0.89019608497619629), (0.875,
0.74117648601531982, 0.74117648601531982), (1.0,
0.50196081399917603, 0.50196081399917603)]}
# The next 7 palettes are from the Yorick scientific visalisation package,
# an evolution of the GIST package, both by David H. Munro.
# They are released under a BSD-like license (see LICENSE_YORICK in
# the license directory of the matplotlib source distribution).
_gist_earth_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.18039216101169586, 0.18039216101169586), (0.0084033617749810219,
0.22745098173618317, 0.22745098173618317), (0.012605042196810246,
0.27058824896812439, 0.27058824896812439), (0.016806723549962044,
0.31764706969261169, 0.31764706969261169), (0.021008403971791267,
0.36078432202339172, 0.36078432202339172), (0.025210084393620491,
0.40784314274787903, 0.40784314274787903), (0.029411764815449715,
0.45490196347236633, 0.45490196347236633), (0.033613447099924088,
0.45490196347236633, 0.45490196347236633), (0.037815127521753311,
0.45490196347236633, 0.45490196347236633), (0.042016807943582535,
0.45490196347236633, 0.45490196347236633), (0.046218488365411758,
0.45490196347236633, 0.45490196347236633), (0.050420168787240982,
0.45882353186607361, 0.45882353186607361), (0.054621849209070206,
0.45882353186607361, 0.45882353186607361), (0.058823529630899429,
0.45882353186607361, 0.45882353186607361), (0.063025213778018951,
0.45882353186607361, 0.45882353186607361), (0.067226894199848175,
0.45882353186607361, 0.45882353186607361), (0.071428574621677399,
0.46274510025978088, 0.46274510025978088), (0.075630255043506622,
0.46274510025978088, 0.46274510025978088), (0.079831935465335846,
0.46274510025978088, 0.46274510025978088), (0.08403361588716507,
0.46274510025978088, 0.46274510025978088), (0.088235296308994293,
0.46274510025978088, 0.46274510025978088), (0.092436976730823517,
0.46666666865348816, 0.46666666865348816), (0.09663865715265274,
0.46666666865348816, 0.46666666865348816), (0.10084033757448196,
0.46666666865348816, 0.46666666865348816), (0.10504201799631119,
0.46666666865348816, 0.46666666865348816), (0.10924369841814041,
0.46666666865348816, 0.46666666865348816), (0.11344537883996964,
0.47058823704719543, 0.47058823704719543), (0.11764705926179886,
0.47058823704719543, 0.47058823704719543), (0.12184873968362808,
0.47058823704719543, 0.47058823704719543), (0.1260504275560379,
0.47058823704719543, 0.47058823704719543), (0.13025210797786713,
0.47058823704719543, 0.47058823704719543), (0.13445378839969635,
0.47450980544090271, 0.47450980544090271), (0.13865546882152557,
0.47450980544090271, 0.47450980544090271), (0.1428571492433548,
0.47450980544090271, 0.47450980544090271), (0.14705882966518402,
0.47450980544090271, 0.47450980544090271), (0.15126051008701324,
0.47450980544090271, 0.47450980544090271), (0.15546219050884247,
0.47843137383460999, 0.47843137383460999), (0.15966387093067169,
0.47843137383460999, 0.47843137383460999), (0.16386555135250092,
0.47843137383460999, 0.47843137383460999), (0.16806723177433014,
0.47843137383460999, 0.47843137383460999), (0.17226891219615936,
0.47843137383460999, 0.47843137383460999), (0.17647059261798859,
0.48235294222831726, 0.48235294222831726), (0.18067227303981781,
0.48235294222831726, 0.48235294222831726), (0.18487395346164703,
0.48235294222831726, 0.48235294222831726), (0.18907563388347626,
0.48235294222831726, 0.48235294222831726), (0.19327731430530548,
0.48235294222831726, 0.48235294222831726), (0.1974789947271347,
0.48627451062202454, 0.48627451062202454), (0.20168067514896393,
0.48627451062202454, 0.48627451062202454), (0.20588235557079315,
0.48627451062202454, 0.48627451062202454), (0.21008403599262238,
0.48627451062202454, 0.48627451062202454), (0.2142857164144516,
0.48627451062202454, 0.48627451062202454), (0.21848739683628082,
0.49019607901573181, 0.49019607901573181), (0.22268907725811005,
0.49019607901573181, 0.49019607901573181), (0.22689075767993927,
0.49019607901573181, 0.49019607901573181), (0.23109243810176849,
0.49019607901573181, 0.49019607901573181), (0.23529411852359772,
0.49019607901573181, 0.49019607901573181), (0.23949579894542694,
0.49411764740943909, 0.49411764740943909), (0.24369747936725616,
0.49411764740943909, 0.49411764740943909), (0.24789915978908539,
0.49411764740943909, 0.49411764740943909), (0.25210085511207581,
0.49411764740943909, 0.49411764740943909), (0.25630253553390503,
0.49411764740943909, 0.49411764740943909), (0.26050421595573425,
0.49803921580314636, 0.49803921580314636), (0.26470589637756348,
0.49803921580314636, 0.49803921580314636), (0.2689075767993927,
0.49803921580314636, 0.49803921580314636), (0.27310925722122192,
0.49803921580314636, 0.49803921580314636), (0.27731093764305115,
0.49803921580314636, 0.49803921580314636), (0.28151261806488037,
0.50196081399917603, 0.50196081399917603), (0.28571429848670959,
0.49411764740943909, 0.49411764740943909), (0.28991597890853882,
0.49019607901573181, 0.49019607901573181), (0.29411765933036804,
0.48627451062202454, 0.48627451062202454), (0.29831933975219727,
0.48235294222831726, 0.48235294222831726), (0.30252102017402649,
0.47843137383460999, 0.47843137383460999), (0.30672270059585571,
0.47058823704719543, 0.47058823704719543), (0.31092438101768494,
0.46666666865348816, 0.46666666865348816), (0.31512606143951416,
0.46274510025978088, 0.46274510025978088), (0.31932774186134338,
0.45882353186607361, 0.45882353186607361), (0.32352942228317261,
0.45098039507865906, 0.45098039507865906), (0.32773110270500183,
0.44705882668495178, 0.44705882668495178), (0.33193278312683105,
0.44313725829124451, 0.44313725829124451), (0.33613446354866028,
0.43529412150382996, 0.43529412150382996), (0.3403361439704895,
0.43137255311012268, 0.43137255311012268), (0.34453782439231873,
0.42745098471641541, 0.42745098471641541), (0.34873950481414795,
0.42352941632270813, 0.42352941632270813), (0.35294118523597717,
0.41568627953529358, 0.41568627953529358), (0.3571428656578064,
0.4117647111415863, 0.4117647111415863), (0.36134454607963562,
0.40784314274787903, 0.40784314274787903), (0.36554622650146484,
0.40000000596046448, 0.40000000596046448), (0.36974790692329407,
0.3960784375667572, 0.3960784375667572), (0.37394958734512329,
0.39215686917304993, 0.39215686917304993), (0.37815126776695251,
0.38431373238563538, 0.38431373238563538), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.37647059559822083, 0.37647059559822083), (0.39075630903244019,
0.36862745881080627, 0.36862745881080627), (0.39495798945426941,
0.364705890417099, 0.364705890417099), (0.39915966987609863,
0.36078432202339172, 0.36078432202339172), (0.40336135029792786,
0.35294118523597717, 0.35294118523597717), (0.40756303071975708,
0.3490196168422699, 0.3490196168422699), (0.4117647111415863,
0.34509804844856262, 0.34509804844856262), (0.41596639156341553,
0.33725491166114807, 0.33725491166114807), (0.42016807198524475,
0.3333333432674408, 0.3333333432674408), (0.42436975240707397,
0.32941177487373352, 0.32941177487373352), (0.4285714328289032,
0.32156863808631897, 0.32156863808631897), (0.43277311325073242,
0.31764706969261169, 0.31764706969261169), (0.43697479367256165,
0.31372550129890442, 0.31372550129890442), (0.44117647409439087,
0.30588236451148987, 0.30588236451148987), (0.44537815451622009,
0.30196079611778259, 0.30196079611778259), (0.44957983493804932,
0.29803922772407532, 0.29803922772407532), (0.45378151535987854,
0.29019609093666077, 0.29019609093666077), (0.45798319578170776,
0.28627452254295349, 0.28627452254295349), (0.46218487620353699,
0.27843138575553894, 0.27843138575553894), (0.46638655662536621,
0.27450981736183167, 0.27450981736183167), (0.47058823704719543,
0.27843138575553894, 0.27843138575553894), (0.47478991746902466,
0.28235295414924622, 0.28235295414924622), (0.47899159789085388,
0.28235295414924622, 0.28235295414924622), (0.48319327831268311,
0.28627452254295349, 0.28627452254295349), (0.48739495873451233,
0.28627452254295349, 0.28627452254295349), (0.49159663915634155,
0.29019609093666077, 0.29019609093666077), (0.49579831957817078,
0.29411765933036804, 0.29411765933036804), (0.5, 0.29411765933036804,
0.29411765933036804), (0.50420171022415161, 0.29803922772407532,
0.29803922772407532), (0.50840336084365845, 0.29803922772407532,
0.29803922772407532), (0.51260507106781006, 0.30196079611778259,
0.30196079611778259), (0.51680672168731689, 0.30196079611778259,
0.30196079611778259), (0.52100843191146851, 0.30588236451148987,
0.30588236451148987), (0.52521008253097534, 0.30980393290519714,
0.30980393290519714), (0.52941179275512695, 0.30980393290519714,
0.30980393290519714), (0.53361344337463379, 0.31372550129890442,
0.31372550129890442), (0.5378151535987854, 0.31372550129890442,
0.31372550129890442), (0.54201680421829224, 0.31764706969261169,
0.31764706969261169), (0.54621851444244385, 0.32156863808631897,
0.32156863808631897), (0.55042016506195068, 0.32156863808631897,
0.32156863808631897), (0.55462187528610229, 0.32156863808631897,
0.32156863808631897), (0.55882352590560913, 0.32549020648002625,
0.32549020648002625), (0.56302523612976074, 0.32549020648002625,
0.32549020648002625), (0.56722688674926758, 0.32549020648002625,
0.32549020648002625), (0.57142859697341919, 0.32941177487373352,
0.32941177487373352), (0.57563024759292603, 0.32941177487373352,
0.32941177487373352), (0.57983195781707764, 0.32941177487373352,
0.32941177487373352), (0.58403360843658447, 0.3333333432674408,
0.3333333432674408), (0.58823531866073608, 0.3333333432674408,
0.3333333432674408), (0.59243696928024292, 0.3333333432674408,
0.3333333432674408), (0.59663867950439453, 0.33725491166114807,
0.33725491166114807), (0.60084033012390137, 0.33725491166114807,
0.33725491166114807), (0.60504204034805298, 0.33725491166114807,
0.33725491166114807), (0.60924369096755981, 0.34117648005485535,
0.34117648005485535), (0.61344540119171143, 0.34117648005485535,
0.34117648005485535), (0.61764705181121826, 0.34117648005485535,
0.34117648005485535), (0.62184876203536987, 0.34509804844856262,
0.34509804844856262), (0.62605041265487671, 0.34509804844856262,
0.34509804844856262), (0.63025212287902832, 0.34509804844856262,
0.34509804844856262), (0.63445377349853516, 0.3490196168422699,
0.3490196168422699), (0.63865548372268677, 0.3490196168422699,
0.3490196168422699), (0.6428571343421936, 0.3490196168422699,
0.3490196168422699), (0.64705884456634521, 0.35294118523597717,
0.35294118523597717), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.35294118523597717,
0.35294118523597717), (0.6596638560295105, 0.35686275362968445,
0.35686275362968445), (0.66386556625366211, 0.35686275362968445,
0.35686275362968445), (0.66806721687316895, 0.35686275362968445,
0.35686275362968445), (0.67226892709732056, 0.36078432202339172,
0.36078432202339172), (0.67647057771682739, 0.36078432202339172,
0.36078432202339172), (0.680672287940979, 0.36078432202339172,
0.36078432202339172), (0.68487393856048584, 0.364705890417099,
0.364705890417099), (0.68907564878463745, 0.364705890417099,
0.364705890417099), (0.69327729940414429, 0.364705890417099,
0.364705890417099), (0.6974790096282959, 0.36862745881080627,
0.36862745881080627), (0.70168066024780273, 0.36862745881080627,
0.36862745881080627), (0.70588237047195435, 0.36862745881080627,
0.36862745881080627), (0.71008402109146118, 0.37254902720451355,
0.37254902720451355), (0.71428573131561279, 0.37254902720451355,
0.37254902720451355), (0.71848738193511963, 0.37254902720451355,
0.37254902720451355), (0.72268909215927124, 0.37647059559822083,
0.37647059559822083), (0.72689074277877808, 0.37647059559822083,
0.37647059559822083), (0.73109245300292969, 0.3803921639919281,
0.3803921639919281), (0.73529410362243652, 0.3803921639919281,
0.3803921639919281), (0.73949581384658813, 0.3803921639919281,
0.3803921639919281), (0.74369746446609497, 0.38431373238563538,
0.38431373238563538), (0.74789917469024658, 0.38431373238563538,
0.38431373238563538), (0.75210082530975342, 0.38431373238563538,
0.38431373238563538), (0.75630253553390503, 0.38823530077934265,
0.38823530077934265), (0.76050418615341187, 0.38823530077934265,
0.38823530077934265), (0.76470589637756348, 0.38823530077934265,
0.38823530077934265), (0.76890754699707031, 0.39215686917304993,
0.39215686917304993), (0.77310925722122192, 0.39215686917304993,
0.39215686917304993), (0.77731090784072876, 0.39215686917304993,
0.39215686917304993), (0.78151261806488037, 0.3960784375667572,
0.3960784375667572), (0.78571426868438721, 0.3960784375667572,
0.3960784375667572), (0.78991597890853882, 0.40784314274787903,
0.40784314274787903), (0.79411762952804565, 0.41568627953529358,
0.41568627953529358), (0.79831933975219727, 0.42352941632270813,
0.42352941632270813), (0.8025209903717041, 0.43529412150382996,
0.43529412150382996), (0.80672270059585571, 0.44313725829124451,
0.44313725829124451), (0.81092435121536255, 0.45490196347236633,
0.45490196347236633), (0.81512606143951416, 0.46274510025978088,
0.46274510025978088), (0.819327712059021, 0.47450980544090271,
0.47450980544090271), (0.82352942228317261, 0.48235294222831726,
0.48235294222831726), (0.82773107290267944, 0.49411764740943909,
0.49411764740943909), (0.83193278312683105, 0.5058823823928833,
0.5058823823928833), (0.83613443374633789, 0.51372551918029785,
0.51372551918029785), (0.8403361439704895, 0.52549022436141968,
0.52549022436141968), (0.84453779458999634, 0.5372549295425415,
0.5372549295425415), (0.84873950481414795, 0.54509806632995605,
0.54509806632995605), (0.85294115543365479, 0.55686277151107788,
0.55686277151107788), (0.8571428656578064, 0.56862747669219971,
0.56862747669219971), (0.86134451627731323, 0.58039218187332153,
0.58039218187332153), (0.86554622650146484, 0.58823531866073608,
0.58823531866073608), (0.86974787712097168, 0.60000002384185791,
0.60000002384185791), (0.87394958734512329, 0.61176472902297974,
0.61176472902297974), (0.87815123796463013, 0.62352943420410156,
0.62352943420410156), (0.88235294818878174, 0.63529413938522339,
0.63529413938522339), (0.88655459880828857, 0.64705884456634521,
0.64705884456634521), (0.89075630903244019, 0.65882354974746704,
0.65882354974746704), (0.89495795965194702, 0.66666668653488159,
0.66666668653488159), (0.89915966987609863, 0.67843139171600342,
0.67843139171600342), (0.90336132049560547, 0.69019609689712524,
0.69019609689712524), (0.90756303071975708, 0.70196080207824707,
0.70196080207824707), (0.91176468133926392, 0.7137255072593689,
0.7137255072593689), (0.91596639156341553, 0.72549021244049072,
0.72549021244049072), (0.92016804218292236, 0.74117648601531982,
0.74117648601531982), (0.92436975240707397, 0.75294119119644165,
0.75294119119644165), (0.92857140302658081, 0.76470589637756348,
0.76470589637756348), (0.93277311325073242, 0.7764706015586853,
0.7764706015586853), (0.93697476387023926, 0.78823530673980713,
0.78823530673980713), (0.94117647409439087, 0.80000001192092896,
0.80000001192092896), (0.94537812471389771, 0.81176471710205078,
0.81176471710205078), (0.94957983493804932, 0.82745099067687988,
0.82745099067687988), (0.95378148555755615, 0.83921569585800171,
0.83921569585800171), (0.95798319578170776, 0.85098040103912354,
0.85098040103912354), (0.9621848464012146, 0.86274510622024536,
0.86274510622024536), (0.96638655662536621, 0.87843137979507446,
0.87843137979507446), (0.97058820724487305, 0.89019608497619629,
0.89019608497619629), (0.97478991746902466, 0.90196079015731812,
0.90196079015731812), (0.97899156808853149, 0.91764706373214722,
0.91764706373214722), (0.98319327831268311, 0.92941176891326904,
0.92941176891326904), (0.98739492893218994, 0.94509804248809814,
0.94509804248809814), (0.99159663915634155, 0.95686274766921997,
0.95686274766921997), (0.99579828977584839, 0.97254902124404907,
0.97254902124404907), (1.0, 0.9843137264251709, 0.9843137264251709)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.011764706112444401, 0.011764706112444401),
(0.037815127521753311, 0.023529412224888802, 0.023529412224888802),
(0.042016807943582535, 0.031372550874948502, 0.031372550874948502),
(0.046218488365411758, 0.043137256056070328, 0.043137256056070328),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.062745101749897003, 0.062745101749897003),
(0.058823529630899429, 0.070588238537311554, 0.070588238537311554),
(0.063025213778018951, 0.08235294371843338, 0.08235294371843338),
(0.067226894199848175, 0.090196080505847931, 0.090196080505847931),
(0.071428574621677399, 0.10196078568696976, 0.10196078568696976),
(0.075630255043506622, 0.10980392247438431, 0.10980392247438431),
(0.079831935465335846, 0.12156862765550613, 0.12156862765550613),
(0.08403361588716507, 0.12941177189350128, 0.12941177189350128),
(0.088235296308994293, 0.14117647707462311, 0.14117647707462311),
(0.092436976730823517, 0.14901961386203766, 0.14901961386203766),
(0.09663865715265274, 0.16078431904315948, 0.16078431904315948),
(0.10084033757448196, 0.16862745583057404, 0.16862745583057404),
(0.10504201799631119, 0.17647059261798859, 0.17647059261798859),
(0.10924369841814041, 0.18823529779911041, 0.18823529779911041),
(0.11344537883996964, 0.19607843458652496, 0.19607843458652496),
(0.11764705926179886, 0.20392157137393951, 0.20392157137393951),
(0.12184873968362808, 0.21568627655506134, 0.21568627655506134),
(0.1260504275560379, 0.22352941334247589, 0.22352941334247589),
(0.13025210797786713, 0.23137255012989044, 0.23137255012989044),
(0.13445378839969635, 0.23921568691730499, 0.23921568691730499),
(0.13865546882152557, 0.25098040699958801, 0.25098040699958801),
(0.1428571492433548, 0.25882354378700256, 0.25882354378700256),
(0.14705882966518402, 0.26666668057441711, 0.26666668057441711),
(0.15126051008701324, 0.27450981736183167, 0.27450981736183167),
(0.15546219050884247, 0.28235295414924622, 0.28235295414924622),
(0.15966387093067169, 0.29019609093666077, 0.29019609093666077),
(0.16386555135250092, 0.30196079611778259, 0.30196079611778259),
(0.16806723177433014, 0.30980393290519714, 0.30980393290519714),
(0.17226891219615936, 0.31764706969261169, 0.31764706969261169),
(0.17647059261798859, 0.32549020648002625, 0.32549020648002625),
(0.18067227303981781, 0.3333333432674408, 0.3333333432674408),
(0.18487395346164703, 0.34117648005485535, 0.34117648005485535),
(0.18907563388347626, 0.3490196168422699, 0.3490196168422699),
(0.19327731430530548, 0.35686275362968445, 0.35686275362968445),
(0.1974789947271347, 0.364705890417099, 0.364705890417099),
(0.20168067514896393, 0.37254902720451355, 0.37254902720451355),
(0.20588235557079315, 0.3803921639919281, 0.3803921639919281),
(0.21008403599262238, 0.38823530077934265, 0.38823530077934265),
(0.2142857164144516, 0.39215686917304993, 0.39215686917304993),
(0.21848739683628082, 0.40000000596046448, 0.40000000596046448),
(0.22268907725811005, 0.40784314274787903, 0.40784314274787903),
(0.22689075767993927, 0.41568627953529358, 0.41568627953529358),
(0.23109243810176849, 0.42352941632270813, 0.42352941632270813),
(0.23529411852359772, 0.42745098471641541, 0.42745098471641541),
(0.23949579894542694, 0.43529412150382996, 0.43529412150382996),
(0.24369747936725616, 0.44313725829124451, 0.44313725829124451),
(0.24789915978908539, 0.45098039507865906, 0.45098039507865906),
(0.25210085511207581, 0.45490196347236633, 0.45490196347236633),
(0.25630253553390503, 0.46274510025978088, 0.46274510025978088),
(0.26050421595573425, 0.47058823704719543, 0.47058823704719543),
(0.26470589637756348, 0.47450980544090271, 0.47450980544090271),
(0.2689075767993927, 0.48235294222831726, 0.48235294222831726),
(0.27310925722122192, 0.49019607901573181, 0.49019607901573181),
(0.27731093764305115, 0.49411764740943909, 0.49411764740943909),
(0.28151261806488037, 0.50196081399917603, 0.50196081399917603),
(0.28571429848670959, 0.50196081399917603, 0.50196081399917603),
(0.28991597890853882, 0.5058823823928833, 0.5058823823928833),
(0.29411765933036804, 0.5058823823928833, 0.5058823823928833),
(0.29831933975219727, 0.50980395078659058, 0.50980395078659058),
(0.30252102017402649, 0.51372551918029785, 0.51372551918029785),
(0.30672270059585571, 0.51372551918029785, 0.51372551918029785),
(0.31092438101768494, 0.51764708757400513, 0.51764708757400513),
(0.31512606143951416, 0.5215686559677124, 0.5215686559677124),
(0.31932774186134338, 0.5215686559677124, 0.5215686559677124),
(0.32352942228317261, 0.52549022436141968, 0.52549022436141968),
(0.32773110270500183, 0.52549022436141968, 0.52549022436141968),
(0.33193278312683105, 0.52941179275512695, 0.52941179275512695),
(0.33613446354866028, 0.53333336114883423, 0.53333336114883423),
(0.3403361439704895, 0.53333336114883423, 0.53333336114883423),
(0.34453782439231873, 0.5372549295425415, 0.5372549295425415),
(0.34873950481414795, 0.54117649793624878, 0.54117649793624878),
(0.35294118523597717, 0.54117649793624878, 0.54117649793624878),
(0.3571428656578064, 0.54509806632995605, 0.54509806632995605),
(0.36134454607963562, 0.54901963472366333, 0.54901963472366333),
(0.36554622650146484, 0.54901963472366333, 0.54901963472366333),
(0.36974790692329407, 0.55294120311737061, 0.55294120311737061),
(0.37394958734512329, 0.55294120311737061, 0.55294120311737061),
(0.37815126776695251, 0.55686277151107788, 0.55686277151107788),
(0.38235294818878174, 0.56078433990478516, 0.56078433990478516),
(0.38655462861061096, 0.56078433990478516, 0.56078433990478516),
(0.39075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.39495798945426941, 0.56862747669219971, 0.56862747669219971),
(0.39915966987609863, 0.56862747669219971, 0.56862747669219971),
(0.40336135029792786, 0.57254904508590698, 0.57254904508590698),
(0.40756303071975708, 0.57254904508590698, 0.57254904508590698),
(0.4117647111415863, 0.57647061347961426, 0.57647061347961426),
(0.41596639156341553, 0.58039218187332153, 0.58039218187332153),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.58431375026702881, 0.58431375026702881),
(0.4285714328289032, 0.58823531866073608, 0.58823531866073608),
(0.43277311325073242, 0.58823531866073608, 0.58823531866073608),
(0.43697479367256165, 0.59215688705444336, 0.59215688705444336),
(0.44117647409439087, 0.59215688705444336, 0.59215688705444336),
(0.44537815451622009, 0.59607845544815063, 0.59607845544815063),
(0.44957983493804932, 0.60000002384185791, 0.60000002384185791),
(0.45378151535987854, 0.60000002384185791, 0.60000002384185791),
(0.45798319578170776, 0.60392159223556519, 0.60392159223556519),
(0.46218487620353699, 0.60784316062927246, 0.60784316062927246),
(0.46638655662536621, 0.60784316062927246, 0.60784316062927246),
(0.47058823704719543, 0.61176472902297974, 0.61176472902297974),
(0.47478991746902466, 0.61176472902297974, 0.61176472902297974),
(0.47899159789085388, 0.61568629741668701, 0.61568629741668701),
(0.48319327831268311, 0.61960786581039429, 0.61960786581039429),
(0.48739495873451233, 0.61960786581039429, 0.61960786581039429),
(0.49159663915634155, 0.62352943420410156, 0.62352943420410156),
(0.49579831957817078, 0.62745100259780884, 0.62745100259780884), (0.5,
0.62745100259780884, 0.62745100259780884), (0.50420171022415161,
0.63137257099151611, 0.63137257099151611), (0.50840336084365845,
0.63137257099151611, 0.63137257099151611), (0.51260507106781006,
0.63529413938522339, 0.63529413938522339), (0.51680672168731689,
0.63921570777893066, 0.63921570777893066), (0.52100843191146851,
0.63921570777893066, 0.63921570777893066), (0.52521008253097534,
0.64313727617263794, 0.64313727617263794), (0.52941179275512695,
0.64705884456634521, 0.64705884456634521), (0.53361344337463379,
0.64705884456634521, 0.64705884456634521), (0.5378151535987854,
0.65098041296005249, 0.65098041296005249), (0.54201680421829224,
0.65098041296005249, 0.65098041296005249), (0.54621851444244385,
0.65490198135375977, 0.65490198135375977), (0.55042016506195068,
0.65882354974746704, 0.65882354974746704), (0.55462187528610229,
0.65882354974746704, 0.65882354974746704), (0.55882352590560913,
0.65882354974746704, 0.65882354974746704), (0.56302523612976074,
0.66274511814117432, 0.66274511814117432), (0.56722688674926758,
0.66274511814117432, 0.66274511814117432), (0.57142859697341919,
0.66666668653488159, 0.66666668653488159), (0.57563024759292603,
0.66666668653488159, 0.66666668653488159), (0.57983195781707764,
0.67058825492858887, 0.67058825492858887), (0.58403360843658447,
0.67058825492858887, 0.67058825492858887), (0.58823531866073608,
0.67450982332229614, 0.67450982332229614), (0.59243696928024292,
0.67450982332229614, 0.67450982332229614), (0.59663867950439453,
0.67450982332229614, 0.67450982332229614), (0.60084033012390137,
0.67843139171600342, 0.67843139171600342), (0.60504204034805298,
0.67843139171600342, 0.67843139171600342), (0.60924369096755981,
0.68235296010971069, 0.68235296010971069), (0.61344540119171143,
0.68235296010971069, 0.68235296010971069), (0.61764705181121826,
0.68627452850341797, 0.68627452850341797), (0.62184876203536987,
0.68627452850341797, 0.68627452850341797), (0.62605041265487671,
0.68627452850341797, 0.68627452850341797), (0.63025212287902832,
0.69019609689712524, 0.69019609689712524), (0.63445377349853516,
0.69019609689712524, 0.69019609689712524), (0.63865548372268677,
0.69411766529083252, 0.69411766529083252), (0.6428571343421936,
0.69411766529083252, 0.69411766529083252), (0.64705884456634521,
0.69803923368453979, 0.69803923368453979), (0.65126049518585205,
0.69803923368453979, 0.69803923368453979), (0.65546220541000366,
0.70196080207824707, 0.70196080207824707), (0.6596638560295105,
0.70196080207824707, 0.70196080207824707), (0.66386556625366211,
0.70196080207824707, 0.70196080207824707), (0.66806721687316895,
0.70588237047195435, 0.70588237047195435), (0.67226892709732056,
0.70588237047195435, 0.70588237047195435), (0.67647057771682739,
0.70980393886566162, 0.70980393886566162), (0.680672287940979,
0.70980393886566162, 0.70980393886566162), (0.68487393856048584,
0.7137255072593689, 0.7137255072593689), (0.68907564878463745,
0.7137255072593689, 0.7137255072593689), (0.69327729940414429,
0.71764707565307617, 0.71764707565307617), (0.6974790096282959,
0.71764707565307617, 0.71764707565307617), (0.70168066024780273,
0.7137255072593689, 0.7137255072593689), (0.70588237047195435,
0.70980393886566162, 0.70980393886566162), (0.71008402109146118,
0.70980393886566162, 0.70980393886566162), (0.71428573131561279,
0.70588237047195435, 0.70588237047195435), (0.71848738193511963,
0.70196080207824707, 0.70196080207824707), (0.72268909215927124,
0.69803923368453979, 0.69803923368453979), (0.72689074277877808,
0.69411766529083252, 0.69411766529083252), (0.73109245300292969,
0.69019609689712524, 0.69019609689712524), (0.73529410362243652,
0.68627452850341797, 0.68627452850341797), (0.73949581384658813,
0.68235296010971069, 0.68235296010971069), (0.74369746446609497,
0.67843139171600342, 0.67843139171600342), (0.74789917469024658,
0.67450982332229614, 0.67450982332229614), (0.75210082530975342,
0.67058825492858887, 0.67058825492858887), (0.75630253553390503,
0.66666668653488159, 0.66666668653488159), (0.76050418615341187,
0.66274511814117432, 0.66274511814117432), (0.76470589637756348,
0.65882354974746704, 0.65882354974746704), (0.76890754699707031,
0.65490198135375977, 0.65490198135375977), (0.77310925722122192,
0.65098041296005249, 0.65098041296005249), (0.77731090784072876,
0.64705884456634521, 0.64705884456634521), (0.78151261806488037,
0.64313727617263794, 0.64313727617263794), (0.78571426868438721,
0.63921570777893066, 0.63921570777893066), (0.78991597890853882,
0.63921570777893066, 0.63921570777893066), (0.79411762952804565,
0.64313727617263794, 0.64313727617263794), (0.79831933975219727,
0.64313727617263794, 0.64313727617263794), (0.8025209903717041,
0.64705884456634521, 0.64705884456634521), (0.80672270059585571,
0.64705884456634521, 0.64705884456634521), (0.81092435121536255,
0.65098041296005249, 0.65098041296005249), (0.81512606143951416,
0.65490198135375977, 0.65490198135375977), (0.819327712059021,
0.65490198135375977, 0.65490198135375977), (0.82352942228317261,
0.65882354974746704, 0.65882354974746704), (0.82773107290267944,
0.66274511814117432, 0.66274511814117432), (0.83193278312683105,
0.66666668653488159, 0.66666668653488159), (0.83613443374633789,
0.67058825492858887, 0.67058825492858887), (0.8403361439704895,
0.67450982332229614, 0.67450982332229614), (0.84453779458999634,
0.67843139171600342, 0.67843139171600342), (0.84873950481414795,
0.68235296010971069, 0.68235296010971069), (0.85294115543365479,
0.68627452850341797, 0.68627452850341797), (0.8571428656578064,
0.69019609689712524, 0.69019609689712524), (0.86134451627731323,
0.69411766529083252, 0.69411766529083252), (0.86554622650146484,
0.69803923368453979, 0.69803923368453979), (0.86974787712097168,
0.70196080207824707, 0.70196080207824707), (0.87394958734512329,
0.70980393886566162, 0.70980393886566162), (0.87815123796463013,
0.7137255072593689, 0.7137255072593689), (0.88235294818878174,
0.72156864404678345, 0.72156864404678345), (0.88655459880828857,
0.72549021244049072, 0.72549021244049072), (0.89075630903244019,
0.73333334922790527, 0.73333334922790527), (0.89495795965194702,
0.73725491762161255, 0.73725491762161255), (0.89915966987609863,
0.7450980544090271, 0.7450980544090271), (0.90336132049560547,
0.75294119119644165, 0.75294119119644165), (0.90756303071975708,
0.7607843279838562, 0.7607843279838562), (0.91176468133926392,
0.76862746477127075, 0.76862746477127075), (0.91596639156341553,
0.7764706015586853, 0.7764706015586853), (0.92016804218292236,
0.78431373834609985, 0.78431373834609985), (0.92436975240707397,
0.7921568751335144, 0.7921568751335144), (0.92857140302658081,
0.80000001192092896, 0.80000001192092896), (0.93277311325073242,
0.80784314870834351, 0.80784314870834351), (0.93697476387023926,
0.81568628549575806, 0.81568628549575806), (0.94117647409439087,
0.82745099067687988, 0.82745099067687988), (0.94537812471389771,
0.83529412746429443, 0.83529412746429443), (0.94957983493804932,
0.84313726425170898, 0.84313726425170898), (0.95378148555755615,
0.85490196943283081, 0.85490196943283081), (0.95798319578170776,
0.86666667461395264, 0.86666667461395264), (0.9621848464012146,
0.87450981140136719, 0.87450981140136719), (0.96638655662536621,
0.88627451658248901, 0.88627451658248901), (0.97058820724487305,
0.89803922176361084, 0.89803922176361084), (0.97478991746902466,
0.90980392694473267, 0.90980392694473267), (0.97899156808853149,
0.92156863212585449, 0.92156863212585449), (0.98319327831268311,
0.93333333730697632, 0.93333333730697632), (0.98739492893218994,
0.94509804248809814, 0.94509804248809814), (0.99159663915634155,
0.95686274766921997, 0.95686274766921997), (0.99579828977584839,
0.97254902124404907, 0.97254902124404907), (1.0, 0.9843137264251709,
0.9843137264251709)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0, 0.0), (0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0,
0.0), (0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.0, 0.0), (0.037815127521753311,
0.0039215688593685627, 0.0039215688593685627), (0.042016807943582535,
0.0078431377187371254, 0.0078431377187371254), (0.046218488365411758,
0.0078431377187371254, 0.0078431377187371254), (0.050420168787240982,
0.011764706112444401, 0.011764706112444401), (0.054621849209070206,
0.015686275437474251, 0.015686275437474251), (0.058823529630899429,
0.019607843831181526, 0.019607843831181526), (0.063025213778018951,
0.019607843831181526, 0.019607843831181526), (0.067226894199848175,
0.023529412224888802, 0.023529412224888802), (0.071428574621677399,
0.027450980618596077, 0.027450980618596077), (0.075630255043506622,
0.031372550874948502, 0.031372550874948502), (0.079831935465335846,
0.031372550874948502, 0.031372550874948502), (0.08403361588716507,
0.035294119268655777, 0.035294119268655777), (0.088235296308994293,
0.039215687662363052, 0.039215687662363052), (0.092436976730823517,
0.043137256056070328, 0.043137256056070328), (0.09663865715265274,
0.043137256056070328, 0.043137256056070328), (0.10084033757448196,
0.047058824449777603, 0.047058824449777603), (0.10504201799631119,
0.050980392843484879, 0.050980392843484879), (0.10924369841814041,
0.054901961237192154, 0.054901961237192154), (0.11344537883996964,
0.058823529630899429, 0.058823529630899429), (0.11764705926179886,
0.058823529630899429, 0.058823529630899429), (0.12184873968362808,
0.062745101749897003, 0.062745101749897003), (0.1260504275560379,
0.066666670143604279, 0.066666670143604279), (0.13025210797786713,
0.070588238537311554, 0.070588238537311554), (0.13445378839969635,
0.070588238537311554, 0.070588238537311554), (0.13865546882152557,
0.074509806931018829, 0.074509806931018829), (0.1428571492433548,
0.078431375324726105, 0.078431375324726105), (0.14705882966518402,
0.08235294371843338, 0.08235294371843338), (0.15126051008701324,
0.086274512112140656, 0.086274512112140656), (0.15546219050884247,
0.086274512112140656, 0.086274512112140656), (0.15966387093067169,
0.090196080505847931, 0.090196080505847931), (0.16386555135250092,
0.094117648899555206, 0.094117648899555206), (0.16806723177433014,
0.098039217293262482, 0.098039217293262482), (0.17226891219615936,
0.10196078568696976, 0.10196078568696976), (0.17647059261798859,
0.10196078568696976, 0.10196078568696976), (0.18067227303981781,
0.10588235408067703, 0.10588235408067703), (0.18487395346164703,
0.10980392247438431, 0.10980392247438431), (0.18907563388347626,
0.11372549086809158, 0.11372549086809158), (0.19327731430530548,
0.11764705926179886, 0.11764705926179886), (0.1974789947271347,
0.12156862765550613, 0.12156862765550613), (0.20168067514896393,
0.12156862765550613, 0.12156862765550613), (0.20588235557079315,
0.12549020349979401, 0.12549020349979401), (0.21008403599262238,
0.12941177189350128, 0.12941177189350128), (0.2142857164144516,
0.13333334028720856, 0.13333334028720856), (0.21848739683628082,
0.13725490868091583, 0.13725490868091583), (0.22268907725811005,
0.14117647707462311, 0.14117647707462311), (0.22689075767993927,
0.14117647707462311, 0.14117647707462311), (0.23109243810176849,
0.14509804546833038, 0.14509804546833038), (0.23529411852359772,
0.14901961386203766, 0.14901961386203766), (0.23949579894542694,
0.15294118225574493, 0.15294118225574493), (0.24369747936725616,
0.15686275064945221, 0.15686275064945221), (0.24789915978908539,
0.16078431904315948, 0.16078431904315948), (0.25210085511207581,
0.16078431904315948, 0.16078431904315948), (0.25630253553390503,
0.16470588743686676, 0.16470588743686676), (0.26050421595573425,
0.16862745583057404, 0.16862745583057404), (0.26470589637756348,
0.17254902422428131, 0.17254902422428131), (0.2689075767993927,
0.17647059261798859, 0.17647059261798859), (0.27310925722122192,
0.18039216101169586, 0.18039216101169586), (0.27731093764305115,
0.18431372940540314, 0.18431372940540314), (0.28151261806488037,
0.18823529779911041, 0.18823529779911041), (0.28571429848670959,
0.18823529779911041, 0.18823529779911041), (0.28991597890853882,
0.18823529779911041, 0.18823529779911041), (0.29411765933036804,
0.19215686619281769, 0.19215686619281769), (0.29831933975219727,
0.19215686619281769, 0.19215686619281769), (0.30252102017402649,
0.19607843458652496, 0.19607843458652496), (0.30672270059585571,
0.19607843458652496, 0.19607843458652496), (0.31092438101768494,
0.20000000298023224, 0.20000000298023224), (0.31512606143951416,
0.20000000298023224, 0.20000000298023224), (0.31932774186134338,
0.20392157137393951, 0.20392157137393951), (0.32352942228317261,
0.20392157137393951, 0.20392157137393951), (0.32773110270500183,
0.20784313976764679, 0.20784313976764679), (0.33193278312683105,
0.20784313976764679, 0.20784313976764679), (0.33613446354866028,
0.21176470816135406, 0.21176470816135406), (0.3403361439704895,
0.21176470816135406, 0.21176470816135406), (0.34453782439231873,
0.21568627655506134, 0.21568627655506134), (0.34873950481414795,
0.21568627655506134, 0.21568627655506134), (0.35294118523597717,
0.21960784494876862, 0.21960784494876862), (0.3571428656578064,
0.21960784494876862, 0.21960784494876862), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.22352941334247589, 0.22352941334247589), (0.36974790692329407,
0.22745098173618317, 0.22745098173618317), (0.37394958734512329,
0.22745098173618317, 0.22745098173618317), (0.37815126776695251,
0.23137255012989044, 0.23137255012989044), (0.38235294818878174,
0.23137255012989044, 0.23137255012989044), (0.38655462861061096,
0.23529411852359772, 0.23529411852359772), (0.39075630903244019,
0.23921568691730499, 0.23921568691730499), (0.39495798945426941,
0.23921568691730499, 0.23921568691730499), (0.39915966987609863,
0.24313725531101227, 0.24313725531101227), (0.40336135029792786,
0.24313725531101227, 0.24313725531101227), (0.40756303071975708,
0.24705882370471954, 0.24705882370471954), (0.4117647111415863,
0.24705882370471954, 0.24705882370471954), (0.41596639156341553,
0.25098040699958801, 0.25098040699958801), (0.42016807198524475,
0.25098040699958801, 0.25098040699958801), (0.42436975240707397,
0.25490197539329529, 0.25490197539329529), (0.4285714328289032,
0.25490197539329529, 0.25490197539329529), (0.43277311325073242,
0.25882354378700256, 0.25882354378700256), (0.43697479367256165,
0.26274511218070984, 0.26274511218070984), (0.44117647409439087,
0.26274511218070984, 0.26274511218070984), (0.44537815451622009,
0.26666668057441711, 0.26666668057441711), (0.44957983493804932,
0.26666668057441711, 0.26666668057441711), (0.45378151535987854,
0.27058824896812439, 0.27058824896812439), (0.45798319578170776,
0.27058824896812439, 0.27058824896812439), (0.46218487620353699,
0.27450981736183167, 0.27450981736183167), (0.46638655662536621,
0.27843138575553894, 0.27843138575553894), (0.47058823704719543,
0.28627452254295349, 0.28627452254295349), (0.47478991746902466,
0.29803922772407532, 0.29803922772407532), (0.47899159789085388,
0.30588236451148987, 0.30588236451148987), (0.48319327831268311,
0.31764706969261169, 0.31764706969261169), (0.48739495873451233,
0.32549020648002625, 0.32549020648002625), (0.49159663915634155,
0.33725491166114807, 0.33725491166114807), (0.49579831957817078,
0.34509804844856262, 0.34509804844856262), (0.5, 0.35686275362968445,
0.35686275362968445), (0.50420171022415161, 0.36862745881080627,
0.36862745881080627), (0.50840336084365845, 0.37647059559822083,
0.37647059559822083), (0.51260507106781006, 0.38823530077934265,
0.38823530077934265), (0.51680672168731689, 0.3960784375667572,
0.3960784375667572), (0.52100843191146851, 0.40784314274787903,
0.40784314274787903), (0.52521008253097534, 0.41568627953529358,
0.41568627953529358), (0.52941179275512695, 0.42745098471641541,
0.42745098471641541), (0.53361344337463379, 0.43529412150382996,
0.43529412150382996), (0.5378151535987854, 0.44705882668495178,
0.44705882668495178), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.46666666865348816,
0.46666666865348816), (0.55042016506195068, 0.47450980544090271,
0.47450980544090271), (0.55462187528610229, 0.47843137383460999,
0.47843137383460999), (0.55882352590560913, 0.48627451062202454,
0.48627451062202454), (0.56302523612976074, 0.49411764740943909,
0.49411764740943909), (0.56722688674926758, 0.50196081399917603,
0.50196081399917603), (0.57142859697341919, 0.5058823823928833,
0.5058823823928833), (0.57563024759292603, 0.51372551918029785,
0.51372551918029785), (0.57983195781707764, 0.5215686559677124,
0.5215686559677124), (0.58403360843658447, 0.52941179275512695,
0.52941179275512695), (0.58823531866073608, 0.53333336114883423,
0.53333336114883423), (0.59243696928024292, 0.54117649793624878,
0.54117649793624878), (0.59663867950439453, 0.54901963472366333,
0.54901963472366333), (0.60084033012390137, 0.55294120311737061,
0.55294120311737061), (0.60504204034805298, 0.56078433990478516,
0.56078433990478516), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.57647061347961426,
0.57647061347961426), (0.61764705181121826, 0.58431375026702881,
0.58431375026702881), (0.62184876203536987, 0.58823531866073608,
0.58823531866073608), (0.62605041265487671, 0.59607845544815063,
0.59607845544815063), (0.63025212287902832, 0.60392159223556519,
0.60392159223556519), (0.63445377349853516, 0.61176472902297974,
0.61176472902297974), (0.63865548372268677, 0.61568629741668701,
0.61568629741668701), (0.6428571343421936, 0.62352943420410156,
0.62352943420410156), (0.64705884456634521, 0.63137257099151611,
0.63137257099151611), (0.65126049518585205, 0.63921570777893066,
0.63921570777893066), (0.65546220541000366, 0.64705884456634521,
0.64705884456634521), (0.6596638560295105, 0.65098041296005249,
0.65098041296005249), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67450982332229614,
0.67450982332229614), (0.67647057771682739, 0.68235296010971069,
0.68235296010971069), (0.680672287940979, 0.68627452850341797,
0.68627452850341797), (0.68487393856048584, 0.69411766529083252,
0.69411766529083252), (0.68907564878463745, 0.70196080207824707,
0.70196080207824707), (0.69327729940414429, 0.70980393886566162,
0.70980393886566162), (0.6974790096282959, 0.71764707565307617,
0.71764707565307617), (0.70168066024780273, 0.71764707565307617,
0.71764707565307617), (0.70588237047195435, 0.72156864404678345,
0.72156864404678345), (0.71008402109146118, 0.72156864404678345,
0.72156864404678345), (0.71428573131561279, 0.72549021244049072,
0.72549021244049072), (0.71848738193511963, 0.72549021244049072,
0.72549021244049072), (0.72268909215927124, 0.729411780834198,
0.729411780834198), (0.72689074277877808, 0.729411780834198,
0.729411780834198), (0.73109245300292969, 0.73333334922790527,
0.73333334922790527), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.73725491762161255,
0.73725491762161255), (0.75210082530975342, 0.74117648601531982,
0.74117648601531982), (0.75630253553390503, 0.74117648601531982,
0.74117648601531982), (0.76050418615341187, 0.7450980544090271,
0.7450980544090271), (0.76470589637756348, 0.7450980544090271,
0.7450980544090271), (0.76890754699707031, 0.7450980544090271,
0.7450980544090271), (0.77310925722122192, 0.74901962280273438,
0.74901962280273438), (0.77731090784072876, 0.74901962280273438,
0.74901962280273438), (0.78151261806488037, 0.75294119119644165,
0.75294119119644165), (0.78571426868438721, 0.75294119119644165,
0.75294119119644165), (0.78991597890853882, 0.75686275959014893,
0.75686275959014893), (0.79411762952804565, 0.76470589637756348,
0.76470589637756348), (0.79831933975219727, 0.76862746477127075,
0.76862746477127075), (0.8025209903717041, 0.77254903316497803,
0.77254903316497803), (0.80672270059585571, 0.7764706015586853,
0.7764706015586853), (0.81092435121536255, 0.78039216995239258,
0.78039216995239258), (0.81512606143951416, 0.78823530673980713,
0.78823530673980713), (0.819327712059021, 0.7921568751335144,
0.7921568751335144), (0.82352942228317261, 0.79607844352722168,
0.79607844352722168), (0.82773107290267944, 0.80000001192092896,
0.80000001192092896), (0.83193278312683105, 0.80392158031463623,
0.80392158031463623), (0.83613443374633789, 0.81176471710205078,
0.81176471710205078), (0.8403361439704895, 0.81568628549575806,
0.81568628549575806), (0.84453779458999634, 0.81960785388946533,
0.81960785388946533), (0.84873950481414795, 0.82352942228317261,
0.82352942228317261), (0.85294115543365479, 0.82745099067687988,
0.82745099067687988), (0.8571428656578064, 0.83529412746429443,
0.83529412746429443), (0.86134451627731323, 0.83921569585800171,
0.83921569585800171), (0.86554622650146484, 0.84313726425170898,
0.84313726425170898), (0.86974787712097168, 0.84705883264541626,
0.84705883264541626), (0.87394958734512329, 0.85098040103912354,
0.85098040103912354), (0.87815123796463013, 0.85882353782653809,
0.85882353782653809), (0.88235294818878174, 0.86274510622024536,
0.86274510622024536), (0.88655459880828857, 0.86666667461395264,
0.86666667461395264), (0.89075630903244019, 0.87058824300765991,
0.87058824300765991), (0.89495795965194702, 0.87450981140136719,
0.87450981140136719), (0.89915966987609863, 0.88235294818878174,
0.88235294818878174), (0.90336132049560547, 0.88627451658248901,
0.88627451658248901), (0.90756303071975708, 0.89019608497619629,
0.89019608497619629), (0.91176468133926392, 0.89411765336990356,
0.89411765336990356), (0.91596639156341553, 0.89803922176361084,
0.89803922176361084), (0.92016804218292236, 0.90588235855102539,
0.90588235855102539), (0.92436975240707397, 0.90980392694473267,
0.90980392694473267), (0.92857140302658081, 0.91372549533843994,
0.91372549533843994), (0.93277311325073242, 0.91764706373214722,
0.91764706373214722), (0.93697476387023926, 0.92156863212585449,
0.92156863212585449), (0.94117647409439087, 0.92941176891326904,
0.92941176891326904), (0.94537812471389771, 0.93333333730697632,
0.93333333730697632), (0.94957983493804932, 0.93725490570068359,
0.93725490570068359), (0.95378148555755615, 0.94117647409439087,
0.94117647409439087), (0.95798319578170776, 0.94509804248809814,
0.94509804248809814), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.95686274766921997,
0.95686274766921997), (0.97058820724487305, 0.96078431606292725,
0.96078431606292725), (0.97478991746902466, 0.96470588445663452,
0.96470588445663452), (0.97899156808853149, 0.9686274528503418,
0.9686274528503418), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_gray_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0039215688593685627, 0.0039215688593685627), (0.0084033617749810219,
0.0078431377187371254, 0.0078431377187371254), (0.012605042196810246,
0.011764706112444401, 0.011764706112444401), (0.016806723549962044,
0.015686275437474251, 0.015686275437474251), (0.021008403971791267,
0.019607843831181526, 0.019607843831181526), (0.025210084393620491,
0.023529412224888802, 0.023529412224888802), (0.029411764815449715,
0.027450980618596077, 0.027450980618596077), (0.033613447099924088,
0.035294119268655777, 0.035294119268655777), (0.037815127521753311,
0.039215687662363052, 0.039215687662363052), (0.042016807943582535,
0.043137256056070328, 0.043137256056070328), (0.046218488365411758,
0.047058824449777603, 0.047058824449777603), (0.050420168787240982,
0.050980392843484879, 0.050980392843484879), (0.054621849209070206,
0.054901961237192154, 0.054901961237192154), (0.058823529630899429,
0.058823529630899429, 0.058823529630899429), (0.063025213778018951,
0.062745101749897003, 0.062745101749897003), (0.067226894199848175,
0.066666670143604279, 0.066666670143604279), (0.071428574621677399,
0.070588238537311554, 0.070588238537311554), (0.075630255043506622,
0.074509806931018829, 0.074509806931018829), (0.079831935465335846,
0.078431375324726105, 0.078431375324726105), (0.08403361588716507,
0.08235294371843338, 0.08235294371843338), (0.088235296308994293,
0.086274512112140656, 0.086274512112140656), (0.092436976730823517,
0.090196080505847931, 0.090196080505847931), (0.09663865715265274,
0.098039217293262482, 0.098039217293262482), (0.10084033757448196,
0.10196078568696976, 0.10196078568696976), (0.10504201799631119,
0.10588235408067703, 0.10588235408067703), (0.10924369841814041,
0.10980392247438431, 0.10980392247438431), (0.11344537883996964,
0.11372549086809158, 0.11372549086809158), (0.11764705926179886,
0.11764705926179886, 0.11764705926179886), (0.12184873968362808,
0.12156862765550613, 0.12156862765550613), (0.1260504275560379,
0.12549020349979401, 0.12549020349979401), (0.13025210797786713,
0.12941177189350128, 0.12941177189350128), (0.13445378839969635,
0.13333334028720856, 0.13333334028720856), (0.13865546882152557,
0.13725490868091583, 0.13725490868091583), (0.1428571492433548,
0.14117647707462311, 0.14117647707462311), (0.14705882966518402,
0.14509804546833038, 0.14509804546833038), (0.15126051008701324,
0.14901961386203766, 0.14901961386203766), (0.15546219050884247,
0.15294118225574493, 0.15294118225574493), (0.15966387093067169,
0.16078431904315948, 0.16078431904315948), (0.16386555135250092,
0.16470588743686676, 0.16470588743686676), (0.16806723177433014,
0.16862745583057404, 0.16862745583057404), (0.17226891219615936,
0.17254902422428131, 0.17254902422428131), (0.17647059261798859,
0.17647059261798859, 0.17647059261798859), (0.18067227303981781,
0.18039216101169586, 0.18039216101169586), (0.18487395346164703,
0.18431372940540314, 0.18431372940540314), (0.18907563388347626,
0.18823529779911041, 0.18823529779911041), (0.19327731430530548,
0.19215686619281769, 0.19215686619281769), (0.1974789947271347,
0.19607843458652496, 0.19607843458652496), (0.20168067514896393,
0.20000000298023224, 0.20000000298023224), (0.20588235557079315,
0.20392157137393951, 0.20392157137393951), (0.21008403599262238,
0.20784313976764679, 0.20784313976764679), (0.2142857164144516,
0.21176470816135406, 0.21176470816135406), (0.21848739683628082,
0.21568627655506134, 0.21568627655506134), (0.22268907725811005,
0.22352941334247589, 0.22352941334247589), (0.22689075767993927,
0.22745098173618317, 0.22745098173618317), (0.23109243810176849,
0.23137255012989044, 0.23137255012989044), (0.23529411852359772,
0.23529411852359772, 0.23529411852359772), (0.23949579894542694,
0.23921568691730499, 0.23921568691730499), (0.24369747936725616,
0.24313725531101227, 0.24313725531101227), (0.24789915978908539,
0.24705882370471954, 0.24705882370471954), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28627452254295349, 0.28627452254295349), (0.28991597890853882,
0.29019609093666077, 0.29019609093666077), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.3490196168422699, 0.3490196168422699), (0.35294118523597717,
0.35294118523597717, 0.35294118523597717), (0.3571428656578064,
0.35686275362968445, 0.35686275362968445), (0.36134454607963562,
0.36078432202339172, 0.36078432202339172), (0.36554622650146484,
0.364705890417099, 0.364705890417099), (0.36974790692329407,
0.36862745881080627, 0.36862745881080627), (0.37394958734512329,
0.37254902720451355, 0.37254902720451355), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.4117647111415863, 0.4117647111415863), (0.41596639156341553,
0.41568627953529358, 0.41568627953529358), (0.42016807198524475,
0.41960784792900085, 0.41960784792900085), (0.42436975240707397,
0.42352941632270813, 0.42352941632270813), (0.4285714328289032,
0.42745098471641541, 0.42745098471641541), (0.43277311325073242,
0.43137255311012268, 0.43137255311012268), (0.43697479367256165,
0.43529412150382996, 0.43529412150382996), (0.44117647409439087,
0.43921568989753723, 0.43921568989753723), (0.44537815451622009,
0.44313725829124451, 0.44313725829124451), (0.44957983493804932,
0.44705882668495178, 0.44705882668495178), (0.45378151535987854,
0.45098039507865906, 0.45098039507865906), (0.45798319578170776,
0.45490196347236633, 0.45490196347236633), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47450980544090271, 0.47450980544090271), (0.47899159789085388,
0.47843137383460999, 0.47843137383460999), (0.48319327831268311,
0.48235294222831726, 0.48235294222831726), (0.48739495873451233,
0.48627451062202454, 0.48627451062202454), (0.49159663915634155,
0.49019607901573181, 0.49019607901573181), (0.49579831957817078,
0.49411764740943909, 0.49411764740943909), (0.5, 0.49803921580314636,
0.49803921580314636), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.5372549295425415,
0.5372549295425415), (0.54201680421829224, 0.54117649793624878,
0.54117649793624878), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.60392159223556519,
0.60392159223556519), (0.60924369096755981, 0.60784316062927246,
0.60784316062927246), (0.61344540119171143, 0.61176472902297974,
0.61176472902297974), (0.61764705181121826, 0.61568629741668701,
0.61568629741668701), (0.62184876203536987, 0.61960786581039429,
0.61960786581039429), (0.62605041265487671, 0.62352943420410156,
0.62352943420410156), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.66274511814117432,
0.66274511814117432), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67058825492858887,
0.67058825492858887), (0.67647057771682739, 0.67450982332229614,
0.67450982332229614), (0.680672287940979, 0.67843139171600342,
0.67843139171600342), (0.68487393856048584, 0.68235296010971069,
0.68235296010971069), (0.68907564878463745, 0.68627452850341797,
0.68627452850341797), (0.69327729940414429, 0.69019609689712524,
0.69019609689712524), (0.6974790096282959, 0.69411766529083252,
0.69411766529083252), (0.70168066024780273, 0.69803923368453979,
0.69803923368453979), (0.70588237047195435, 0.70196080207824707,
0.70196080207824707), (0.71008402109146118, 0.70588237047195435,
0.70588237047195435), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72549021244049072,
0.72549021244049072), (0.73109245300292969, 0.729411780834198,
0.729411780834198), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73725491762161255,
0.73725491762161255), (0.74369746446609497, 0.74117648601531982,
0.74117648601531982), (0.74789917469024658, 0.7450980544090271,
0.7450980544090271), (0.75210082530975342, 0.74901962280273438,
0.74901962280273438), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78823530673980713,
0.78823530673980713), (0.79411762952804565, 0.7921568751335144,
0.7921568751335144), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.85098040103912354,
0.85098040103912354), (0.8571428656578064, 0.85490196943283081,
0.85490196943283081), (0.86134451627731323, 0.85882353782653809,
0.85882353782653809), (0.86554622650146484, 0.86274510622024536,
0.86274510622024536), (0.86974787712097168, 0.86666667461395264,
0.86666667461395264), (0.87394958734512329, 0.87058824300765991,
0.87058824300765991), (0.87815123796463013, 0.87450981140136719,
0.87450981140136719), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.91372549533843994,
0.91372549533843994), (0.92016804218292236, 0.91764706373214722,
0.91764706373214722), (0.92436975240707397, 0.92156863212585449,
0.92156863212585449), (0.92857140302658081, 0.92549020051956177,
0.92549020051956177), (0.93277311325073242, 0.92941176891326904,
0.92941176891326904), (0.93697476387023926, 0.93333333730697632,
0.93333333730697632), (0.94117647409439087, 0.93725490570068359,
0.93725490570068359), (0.94537812471389771, 0.94117647409439087,
0.94117647409439087), (0.94957983493804932, 0.94509804248809814,
0.94509804248809814), (0.95378148555755615, 0.94901961088180542,
0.94901961088180542), (0.95798319578170776, 0.9529411792755127,
0.9529411792755127), (0.9621848464012146, 0.95686274766921997,
0.95686274766921997), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97647058963775635,
0.97647058963775635), (0.98319327831268311, 0.98039215803146362,
0.98039215803146362), (0.98739492893218994, 0.9843137264251709,
0.9843137264251709), (0.99159663915634155, 0.98823529481887817,
0.98823529481887817), (0.99579828977584839, 0.99215686321258545,
0.99215686321258545), (1.0, 0.99607843160629272, 0.99607843160629272)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.011764706112444401,
0.011764706112444401), (0.016806723549962044, 0.015686275437474251,
0.015686275437474251), (0.021008403971791267, 0.019607843831181526,
0.019607843831181526), (0.025210084393620491, 0.023529412224888802,
0.023529412224888802), (0.029411764815449715, 0.027450980618596077,
0.027450980618596077), (0.033613447099924088, 0.035294119268655777,
0.035294119268655777), (0.037815127521753311, 0.039215687662363052,
0.039215687662363052), (0.042016807943582535, 0.043137256056070328,
0.043137256056070328), (0.046218488365411758, 0.047058824449777603,
0.047058824449777603), (0.050420168787240982, 0.050980392843484879,
0.050980392843484879), (0.054621849209070206, 0.054901961237192154,
0.054901961237192154), (0.058823529630899429, 0.058823529630899429,
0.058823529630899429), (0.063025213778018951, 0.062745101749897003,
0.062745101749897003), (0.067226894199848175, 0.066666670143604279,
0.066666670143604279), (0.071428574621677399, 0.070588238537311554,
0.070588238537311554), (0.075630255043506622, 0.074509806931018829,
0.074509806931018829), (0.079831935465335846, 0.078431375324726105,
0.078431375324726105), (0.08403361588716507, 0.08235294371843338,
0.08235294371843338), (0.088235296308994293, 0.086274512112140656,
0.086274512112140656), (0.092436976730823517, 0.090196080505847931,
0.090196080505847931), (0.09663865715265274, 0.098039217293262482,
0.098039217293262482), (0.10084033757448196, 0.10196078568696976,
0.10196078568696976), (0.10504201799631119, 0.10588235408067703,
0.10588235408067703), (0.10924369841814041, 0.10980392247438431,
0.10980392247438431), (0.11344537883996964, 0.11372549086809158,
0.11372549086809158), (0.11764705926179886, 0.11764705926179886,
0.11764705926179886), (0.12184873968362808, 0.12156862765550613,
0.12156862765550613), (0.1260504275560379, 0.12549020349979401,
0.12549020349979401), (0.13025210797786713, 0.12941177189350128,
0.12941177189350128), (0.13445378839969635, 0.13333334028720856,
0.13333334028720856), (0.13865546882152557, 0.13725490868091583,
0.13725490868091583), (0.1428571492433548, 0.14117647707462311,
0.14117647707462311), (0.14705882966518402, 0.14509804546833038,
0.14509804546833038), (0.15126051008701324, 0.14901961386203766,
0.14901961386203766), (0.15546219050884247, 0.15294118225574493,
0.15294118225574493), (0.15966387093067169, 0.16078431904315948,
0.16078431904315948), (0.16386555135250092, 0.16470588743686676,
0.16470588743686676), (0.16806723177433014, 0.16862745583057404,
0.16862745583057404), (0.17226891219615936, 0.17254902422428131,
0.17254902422428131), (0.17647059261798859, 0.17647059261798859,
0.17647059261798859), (0.18067227303981781, 0.18039216101169586,
0.18039216101169586), (0.18487395346164703, 0.18431372940540314,
0.18431372940540314), (0.18907563388347626, 0.18823529779911041,
0.18823529779911041), (0.19327731430530548, 0.19215686619281769,
0.19215686619281769), (0.1974789947271347, 0.19607843458652496,
0.19607843458652496), (0.20168067514896393, 0.20000000298023224,
0.20000000298023224), (0.20588235557079315, 0.20392157137393951,
0.20392157137393951), (0.21008403599262238, 0.20784313976764679,
0.20784313976764679), (0.2142857164144516, 0.21176470816135406,
0.21176470816135406), (0.21848739683628082, 0.21568627655506134,
0.21568627655506134), (0.22268907725811005, 0.22352941334247589,
0.22352941334247589), (0.22689075767993927, 0.22745098173618317,
0.22745098173618317), (0.23109243810176849, 0.23137255012989044,
0.23137255012989044), (0.23529411852359772, 0.23529411852359772,
0.23529411852359772), (0.23949579894542694, 0.23921568691730499,
0.23921568691730499), (0.24369747936725616, 0.24313725531101227,
0.24313725531101227), (0.24789915978908539, 0.24705882370471954,
0.24705882370471954), (0.25210085511207581, 0.25098040699958801,
0.25098040699958801), (0.25630253553390503, 0.25490197539329529,
0.25490197539329529), (0.26050421595573425, 0.25882354378700256,
0.25882354378700256), (0.26470589637756348, 0.26274511218070984,
0.26274511218070984), (0.2689075767993927, 0.26666668057441711,
0.26666668057441711), (0.27310925722122192, 0.27058824896812439,
0.27058824896812439), (0.27731093764305115, 0.27450981736183167,
0.27450981736183167), (0.28151261806488037, 0.27843138575553894,
0.27843138575553894), (0.28571429848670959, 0.28627452254295349,
0.28627452254295349), (0.28991597890853882, 0.29019609093666077,
0.29019609093666077), (0.29411765933036804, 0.29411765933036804,
0.29411765933036804), (0.29831933975219727, 0.29803922772407532,
0.29803922772407532), (0.30252102017402649, 0.30196079611778259,
0.30196079611778259), (0.30672270059585571, 0.30588236451148987,
0.30588236451148987), (0.31092438101768494, 0.30980393290519714,
0.30980393290519714), (0.31512606143951416, 0.31372550129890442,
0.31372550129890442), (0.31932774186134338, 0.31764706969261169,
0.31764706969261169), (0.32352942228317261, 0.32156863808631897,
0.32156863808631897), (0.32773110270500183, 0.32549020648002625,
0.32549020648002625), (0.33193278312683105, 0.32941177487373352,
0.32941177487373352), (0.33613446354866028, 0.3333333432674408,
0.3333333432674408), (0.3403361439704895, 0.33725491166114807,
0.33725491166114807), (0.34453782439231873, 0.34117648005485535,
0.34117648005485535), (0.34873950481414795, 0.3490196168422699,
0.3490196168422699), (0.35294118523597717, 0.35294118523597717,
0.35294118523597717), (0.3571428656578064, 0.35686275362968445,
0.35686275362968445), (0.36134454607963562, 0.36078432202339172,
0.36078432202339172), (0.36554622650146484, 0.364705890417099,
0.364705890417099), (0.36974790692329407, 0.36862745881080627,
0.36862745881080627), (0.37394958734512329, 0.37254902720451355,
0.37254902720451355), (0.37815126776695251, 0.37647059559822083,
0.37647059559822083), (0.38235294818878174, 0.3803921639919281,
0.3803921639919281), (0.38655462861061096, 0.38431373238563538,
0.38431373238563538), (0.39075630903244019, 0.38823530077934265,
0.38823530077934265), (0.39495798945426941, 0.39215686917304993,
0.39215686917304993), (0.39915966987609863, 0.3960784375667572,
0.3960784375667572), (0.40336135029792786, 0.40000000596046448,
0.40000000596046448), (0.40756303071975708, 0.40392157435417175,
0.40392157435417175), (0.4117647111415863, 0.4117647111415863,
0.4117647111415863), (0.41596639156341553, 0.41568627953529358,
0.41568627953529358), (0.42016807198524475, 0.41960784792900085,
0.41960784792900085), (0.42436975240707397, 0.42352941632270813,
0.42352941632270813), (0.4285714328289032, 0.42745098471641541,
0.42745098471641541), (0.43277311325073242, 0.43137255311012268,
0.43137255311012268), (0.43697479367256165, 0.43529412150382996,
0.43529412150382996), (0.44117647409439087, 0.43921568989753723,
0.43921568989753723), (0.44537815451622009, 0.44313725829124451,
0.44313725829124451), (0.44957983493804932, 0.44705882668495178,
0.44705882668495178), (0.45378151535987854, 0.45098039507865906,
0.45098039507865906), (0.45798319578170776, 0.45490196347236633,
0.45490196347236633), (0.46218487620353699, 0.45882353186607361,
0.45882353186607361), (0.46638655662536621, 0.46274510025978088,
0.46274510025978088), (0.47058823704719543, 0.46666666865348816,
0.46666666865348816), (0.47478991746902466, 0.47450980544090271,
0.47450980544090271), (0.47899159789085388, 0.47843137383460999,
0.47843137383460999), (0.48319327831268311, 0.48235294222831726,
0.48235294222831726), (0.48739495873451233, 0.48627451062202454,
0.48627451062202454), (0.49159663915634155, 0.49019607901573181,
0.49019607901573181), (0.49579831957817078, 0.49411764740943909,
0.49411764740943909), (0.5, 0.49803921580314636, 0.49803921580314636),
(0.50420171022415161, 0.50196081399917603, 0.50196081399917603),
(0.50840336084365845, 0.5058823823928833, 0.5058823823928833),
(0.51260507106781006, 0.50980395078659058, 0.50980395078659058),
(0.51680672168731689, 0.51372551918029785, 0.51372551918029785),
(0.52100843191146851, 0.51764708757400513, 0.51764708757400513),
(0.52521008253097534, 0.5215686559677124, 0.5215686559677124),
(0.52941179275512695, 0.52549022436141968, 0.52549022436141968),
(0.53361344337463379, 0.52941179275512695, 0.52941179275512695),
(0.5378151535987854, 0.5372549295425415, 0.5372549295425415),
(0.54201680421829224, 0.54117649793624878, 0.54117649793624878),
(0.54621851444244385, 0.54509806632995605, 0.54509806632995605),
(0.55042016506195068, 0.54901963472366333, 0.54901963472366333),
(0.55462187528610229, 0.55294120311737061, 0.55294120311737061),
(0.55882352590560913, 0.55686277151107788, 0.55686277151107788),
(0.56302523612976074, 0.56078433990478516, 0.56078433990478516),
(0.56722688674926758, 0.56470590829849243, 0.56470590829849243),
(0.57142859697341919, 0.56862747669219971, 0.56862747669219971),
(0.57563024759292603, 0.57254904508590698, 0.57254904508590698),
(0.57983195781707764, 0.57647061347961426, 0.57647061347961426),
(0.58403360843658447, 0.58039218187332153, 0.58039218187332153),
(0.58823531866073608, 0.58431375026702881, 0.58431375026702881),
(0.59243696928024292, 0.58823531866073608, 0.58823531866073608),
(0.59663867950439453, 0.59215688705444336, 0.59215688705444336),
(0.60084033012390137, 0.60000002384185791, 0.60000002384185791),
(0.60504204034805298, 0.60392159223556519, 0.60392159223556519),
(0.60924369096755981, 0.60784316062927246, 0.60784316062927246),
(0.61344540119171143, 0.61176472902297974, 0.61176472902297974),
(0.61764705181121826, 0.61568629741668701, 0.61568629741668701),
(0.62184876203536987, 0.61960786581039429, 0.61960786581039429),
(0.62605041265487671, 0.62352943420410156, 0.62352943420410156),
(0.63025212287902832, 0.62745100259780884, 0.62745100259780884),
(0.63445377349853516, 0.63137257099151611, 0.63137257099151611),
(0.63865548372268677, 0.63529413938522339, 0.63529413938522339),
(0.6428571343421936, 0.63921570777893066, 0.63921570777893066),
(0.64705884456634521, 0.64313727617263794, 0.64313727617263794),
(0.65126049518585205, 0.64705884456634521, 0.64705884456634521),
(0.65546220541000366, 0.65098041296005249, 0.65098041296005249),
(0.6596638560295105, 0.65490198135375977, 0.65490198135375977),
(0.66386556625366211, 0.66274511814117432, 0.66274511814117432),
(0.66806721687316895, 0.66666668653488159, 0.66666668653488159),
(0.67226892709732056, 0.67058825492858887, 0.67058825492858887),
(0.67647057771682739, 0.67450982332229614, 0.67450982332229614),
(0.680672287940979, 0.67843139171600342, 0.67843139171600342),
(0.68487393856048584, 0.68235296010971069, 0.68235296010971069),
(0.68907564878463745, 0.68627452850341797, 0.68627452850341797),
(0.69327729940414429, 0.69019609689712524, 0.69019609689712524),
(0.6974790096282959, 0.69411766529083252, 0.69411766529083252),
(0.70168066024780273, 0.69803923368453979, 0.69803923368453979),
(0.70588237047195435, 0.70196080207824707, 0.70196080207824707),
(0.71008402109146118, 0.70588237047195435, 0.70588237047195435),
(0.71428573131561279, 0.70980393886566162, 0.70980393886566162),
(0.71848738193511963, 0.7137255072593689, 0.7137255072593689),
(0.72268909215927124, 0.71764707565307617, 0.71764707565307617),
(0.72689074277877808, 0.72549021244049072, 0.72549021244049072),
(0.73109245300292969, 0.729411780834198, 0.729411780834198),
(0.73529410362243652, 0.73333334922790527, 0.73333334922790527),
(0.73949581384658813, 0.73725491762161255, 0.73725491762161255),
(0.74369746446609497, 0.74117648601531982, 0.74117648601531982),
(0.74789917469024658, 0.7450980544090271, 0.7450980544090271),
(0.75210082530975342, 0.74901962280273438, 0.74901962280273438),
(0.75630253553390503, 0.75294119119644165, 0.75294119119644165),
(0.76050418615341187, 0.75686275959014893, 0.75686275959014893),
(0.76470589637756348, 0.7607843279838562, 0.7607843279838562),
(0.76890754699707031, 0.76470589637756348, 0.76470589637756348),
(0.77310925722122192, 0.76862746477127075, 0.76862746477127075),
(0.77731090784072876, 0.77254903316497803, 0.77254903316497803),
(0.78151261806488037, 0.7764706015586853, 0.7764706015586853),
(0.78571426868438721, 0.78039216995239258, 0.78039216995239258),
(0.78991597890853882, 0.78823530673980713, 0.78823530673980713),
(0.79411762952804565, 0.7921568751335144, 0.7921568751335144),
(0.79831933975219727, 0.79607844352722168, 0.79607844352722168),
(0.8025209903717041, 0.80000001192092896, 0.80000001192092896),
(0.80672270059585571, 0.80392158031463623, 0.80392158031463623),
(0.81092435121536255, 0.80784314870834351, 0.80784314870834351),
(0.81512606143951416, 0.81176471710205078, 0.81176471710205078),
(0.819327712059021, 0.81568628549575806, 0.81568628549575806),
(0.82352942228317261, 0.81960785388946533, 0.81960785388946533),
(0.82773107290267944, 0.82352942228317261, 0.82352942228317261),
(0.83193278312683105, 0.82745099067687988, 0.82745099067687988),
(0.83613443374633789, 0.83137255907058716, 0.83137255907058716),
(0.8403361439704895, 0.83529412746429443, 0.83529412746429443),
(0.84453779458999634, 0.83921569585800171, 0.83921569585800171),
(0.84873950481414795, 0.84313726425170898, 0.84313726425170898),
(0.85294115543365479, 0.85098040103912354, 0.85098040103912354),
(0.8571428656578064, 0.85490196943283081, 0.85490196943283081),
(0.86134451627731323, 0.85882353782653809, 0.85882353782653809),
(0.86554622650146484, 0.86274510622024536, 0.86274510622024536),
(0.86974787712097168, 0.86666667461395264, 0.86666667461395264),
(0.87394958734512329, 0.87058824300765991, 0.87058824300765991),
(0.87815123796463013, 0.87450981140136719, 0.87450981140136719),
(0.88235294818878174, 0.87843137979507446, 0.87843137979507446),
(0.88655459880828857, 0.88235294818878174, 0.88235294818878174),
(0.89075630903244019, 0.88627451658248901, 0.88627451658248901),
(0.89495795965194702, 0.89019608497619629, 0.89019608497619629),
(0.89915966987609863, 0.89411765336990356, 0.89411765336990356),
(0.90336132049560547, 0.89803922176361084, 0.89803922176361084),
(0.90756303071975708, 0.90196079015731812, 0.90196079015731812),
(0.91176468133926392, 0.90588235855102539, 0.90588235855102539),
(0.91596639156341553, 0.91372549533843994, 0.91372549533843994),
(0.92016804218292236, 0.91764706373214722, 0.91764706373214722),
(0.92436975240707397, 0.92156863212585449, 0.92156863212585449),
(0.92857140302658081, 0.92549020051956177, 0.92549020051956177),
(0.93277311325073242, 0.92941176891326904, 0.92941176891326904),
(0.93697476387023926, 0.93333333730697632, 0.93333333730697632),
(0.94117647409439087, 0.93725490570068359, 0.93725490570068359),
(0.94537812471389771, 0.94117647409439087, 0.94117647409439087),
(0.94957983493804932, 0.94509804248809814, 0.94509804248809814),
(0.95378148555755615, 0.94901961088180542, 0.94901961088180542),
(0.95798319578170776, 0.9529411792755127, 0.9529411792755127),
(0.9621848464012146, 0.95686274766921997, 0.95686274766921997),
(0.96638655662536621, 0.96078431606292725, 0.96078431606292725),
(0.97058820724487305, 0.96470588445663452, 0.96470588445663452),
(0.97478991746902466, 0.9686274528503418, 0.9686274528503418),
(0.97899156808853149, 0.97647058963775635, 0.97647058963775635),
(0.98319327831268311, 0.98039215803146362, 0.98039215803146362),
(0.98739492893218994, 0.9843137264251709, 0.9843137264251709),
(0.99159663915634155, 0.98823529481887817, 0.98823529481887817),
(0.99579828977584839, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)], 'red': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.035294119268655777, 0.035294119268655777),
(0.037815127521753311, 0.039215687662363052, 0.039215687662363052),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.098039217293262482, 0.098039217293262482),
(0.10084033757448196, 0.10196078568696976, 0.10196078568696976),
(0.10504201799631119, 0.10588235408067703, 0.10588235408067703),
(0.10924369841814041, 0.10980392247438431, 0.10980392247438431),
(0.11344537883996964, 0.11372549086809158, 0.11372549086809158),
(0.11764705926179886, 0.11764705926179886, 0.11764705926179886),
(0.12184873968362808, 0.12156862765550613, 0.12156862765550613),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.16078431904315948, 0.16078431904315948),
(0.16386555135250092, 0.16470588743686676, 0.16470588743686676),
(0.16806723177433014, 0.16862745583057404, 0.16862745583057404),
(0.17226891219615936, 0.17254902422428131, 0.17254902422428131),
(0.17647059261798859, 0.17647059261798859, 0.17647059261798859),
(0.18067227303981781, 0.18039216101169586, 0.18039216101169586),
(0.18487395346164703, 0.18431372940540314, 0.18431372940540314),
(0.18907563388347626, 0.18823529779911041, 0.18823529779911041),
(0.19327731430530548, 0.19215686619281769, 0.19215686619281769),
(0.1974789947271347, 0.19607843458652496, 0.19607843458652496),
(0.20168067514896393, 0.20000000298023224, 0.20000000298023224),
(0.20588235557079315, 0.20392157137393951, 0.20392157137393951),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.22352941334247589, 0.22352941334247589),
(0.22689075767993927, 0.22745098173618317, 0.22745098173618317),
(0.23109243810176849, 0.23137255012989044, 0.23137255012989044),
(0.23529411852359772, 0.23529411852359772, 0.23529411852359772),
(0.23949579894542694, 0.23921568691730499, 0.23921568691730499),
(0.24369747936725616, 0.24313725531101227, 0.24313725531101227),
(0.24789915978908539, 0.24705882370471954, 0.24705882370471954),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28627452254295349, 0.28627452254295349),
(0.28991597890853882, 0.29019609093666077, 0.29019609093666077),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.3490196168422699, 0.3490196168422699),
(0.35294118523597717, 0.35294118523597717, 0.35294118523597717),
(0.3571428656578064, 0.35686275362968445, 0.35686275362968445),
(0.36134454607963562, 0.36078432202339172, 0.36078432202339172),
(0.36554622650146484, 0.364705890417099, 0.364705890417099),
(0.36974790692329407, 0.36862745881080627, 0.36862745881080627),
(0.37394958734512329, 0.37254902720451355, 0.37254902720451355),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.4117647111415863, 0.4117647111415863),
(0.41596639156341553, 0.41568627953529358, 0.41568627953529358),
(0.42016807198524475, 0.41960784792900085, 0.41960784792900085),
(0.42436975240707397, 0.42352941632270813, 0.42352941632270813),
(0.4285714328289032, 0.42745098471641541, 0.42745098471641541),
(0.43277311325073242, 0.43137255311012268, 0.43137255311012268),
(0.43697479367256165, 0.43529412150382996, 0.43529412150382996),
(0.44117647409439087, 0.43921568989753723, 0.43921568989753723),
(0.44537815451622009, 0.44313725829124451, 0.44313725829124451),
(0.44957983493804932, 0.44705882668495178, 0.44705882668495178),
(0.45378151535987854, 0.45098039507865906, 0.45098039507865906),
(0.45798319578170776, 0.45490196347236633, 0.45490196347236633),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47450980544090271, 0.47450980544090271),
(0.47899159789085388, 0.47843137383460999, 0.47843137383460999),
(0.48319327831268311, 0.48235294222831726, 0.48235294222831726),
(0.48739495873451233, 0.48627451062202454, 0.48627451062202454),
(0.49159663915634155, 0.49019607901573181, 0.49019607901573181),
(0.49579831957817078, 0.49411764740943909, 0.49411764740943909), (0.5,
0.49803921580314636, 0.49803921580314636), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.5372549295425415, 0.5372549295425415), (0.54201680421829224,
0.54117649793624878, 0.54117649793624878), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.60000002384185791, 0.60000002384185791), (0.60504204034805298,
0.60392159223556519, 0.60392159223556519), (0.60924369096755981,
0.60784316062927246, 0.60784316062927246), (0.61344540119171143,
0.61176472902297974, 0.61176472902297974), (0.61764705181121826,
0.61568629741668701, 0.61568629741668701), (0.62184876203536987,
0.61960786581039429, 0.61960786581039429), (0.62605041265487671,
0.62352943420410156, 0.62352943420410156), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.66274511814117432, 0.66274511814117432), (0.66806721687316895,
0.66666668653488159, 0.66666668653488159), (0.67226892709732056,
0.67058825492858887, 0.67058825492858887), (0.67647057771682739,
0.67450982332229614, 0.67450982332229614), (0.680672287940979,
0.67843139171600342, 0.67843139171600342), (0.68487393856048584,
0.68235296010971069, 0.68235296010971069), (0.68907564878463745,
0.68627452850341797, 0.68627452850341797), (0.69327729940414429,
0.69019609689712524, 0.69019609689712524), (0.6974790096282959,
0.69411766529083252, 0.69411766529083252), (0.70168066024780273,
0.69803923368453979, 0.69803923368453979), (0.70588237047195435,
0.70196080207824707, 0.70196080207824707), (0.71008402109146118,
0.70588237047195435, 0.70588237047195435), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72549021244049072, 0.72549021244049072), (0.73109245300292969,
0.729411780834198, 0.729411780834198), (0.73529410362243652,
0.73333334922790527, 0.73333334922790527), (0.73949581384658813,
0.73725491762161255, 0.73725491762161255), (0.74369746446609497,
0.74117648601531982, 0.74117648601531982), (0.74789917469024658,
0.7450980544090271, 0.7450980544090271), (0.75210082530975342,
0.74901962280273438, 0.74901962280273438), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78823530673980713, 0.78823530673980713), (0.79411762952804565,
0.7921568751335144, 0.7921568751335144), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.85098040103912354, 0.85098040103912354), (0.8571428656578064,
0.85490196943283081, 0.85490196943283081), (0.86134451627731323,
0.85882353782653809, 0.85882353782653809), (0.86554622650146484,
0.86274510622024536, 0.86274510622024536), (0.86974787712097168,
0.86666667461395264, 0.86666667461395264), (0.87394958734512329,
0.87058824300765991, 0.87058824300765991), (0.87815123796463013,
0.87450981140136719, 0.87450981140136719), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.91372549533843994, 0.91372549533843994), (0.92016804218292236,
0.91764706373214722, 0.91764706373214722), (0.92436975240707397,
0.92156863212585449, 0.92156863212585449), (0.92857140302658081,
0.92549020051956177, 0.92549020051956177), (0.93277311325073242,
0.92941176891326904, 0.92941176891326904), (0.93697476387023926,
0.93333333730697632, 0.93333333730697632), (0.94117647409439087,
0.93725490570068359, 0.93725490570068359), (0.94537812471389771,
0.94117647409439087, 0.94117647409439087), (0.94957983493804932,
0.94509804248809814, 0.94509804248809814), (0.95378148555755615,
0.94901961088180542, 0.94901961088180542), (0.95798319578170776,
0.9529411792755127, 0.9529411792755127), (0.9621848464012146,
0.95686274766921997, 0.95686274766921997), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97647058963775635, 0.97647058963775635), (0.98319327831268311,
0.98039215803146362, 0.98039215803146362), (0.98739492893218994,
0.9843137264251709, 0.9843137264251709), (0.99159663915634155,
0.98823529481887817, 0.98823529481887817), (0.99579828977584839,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)]}
_gist_heat_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388, 0.0, 0.0),
(0.48319327831268311, 0.0, 0.0), (0.48739495873451233, 0.0, 0.0),
(0.49159663915634155, 0.0, 0.0), (0.49579831957817078, 0.0, 0.0), (0.5,
0.0, 0.0), (0.50420171022415161, 0.0, 0.0), (0.50840336084365845, 0.0,
0.0), (0.51260507106781006, 0.0, 0.0), (0.51680672168731689, 0.0, 0.0),
(0.52100843191146851, 0.0, 0.0), (0.52521008253097534, 0.0, 0.0),
(0.52941179275512695, 0.0, 0.0), (0.53361344337463379, 0.0, 0.0),
(0.5378151535987854, 0.0, 0.0), (0.54201680421829224, 0.0, 0.0),
(0.54621851444244385, 0.0, 0.0), (0.55042016506195068, 0.0, 0.0),
(0.55462187528610229, 0.0, 0.0), (0.55882352590560913, 0.0, 0.0),
(0.56302523612976074, 0.0, 0.0), (0.56722688674926758, 0.0, 0.0),
(0.57142859697341919, 0.0, 0.0), (0.57563024759292603, 0.0, 0.0),
(0.57983195781707764, 0.0, 0.0), (0.58403360843658447, 0.0, 0.0),
(0.58823531866073608, 0.0, 0.0), (0.59243696928024292, 0.0, 0.0),
(0.59663867950439453, 0.0, 0.0), (0.60084033012390137, 0.0, 0.0),
(0.60504204034805298, 0.0, 0.0), (0.60924369096755981, 0.0, 0.0),
(0.61344540119171143, 0.0, 0.0), (0.61764705181121826, 0.0, 0.0),
(0.62184876203536987, 0.0, 0.0), (0.62605041265487671, 0.0, 0.0),
(0.63025212287902832, 0.0, 0.0), (0.63445377349853516, 0.0, 0.0),
(0.63865548372268677, 0.0, 0.0), (0.6428571343421936, 0.0, 0.0),
(0.64705884456634521, 0.0, 0.0), (0.65126049518585205, 0.0, 0.0),
(0.65546220541000366, 0.0, 0.0), (0.6596638560295105, 0.0, 0.0),
(0.66386556625366211, 0.0, 0.0), (0.66806721687316895, 0.0, 0.0),
(0.67226892709732056, 0.0, 0.0), (0.67647057771682739, 0.0, 0.0),
(0.680672287940979, 0.0, 0.0), (0.68487393856048584, 0.0, 0.0),
(0.68907564878463745, 0.0, 0.0), (0.69327729940414429, 0.0, 0.0),
(0.6974790096282959, 0.0, 0.0), (0.70168066024780273, 0.0, 0.0),
(0.70588237047195435, 0.0, 0.0), (0.71008402109146118, 0.0, 0.0),
(0.71428573131561279, 0.0, 0.0), (0.71848738193511963, 0.0, 0.0),
(0.72268909215927124, 0.0, 0.0), (0.72689074277877808, 0.0, 0.0),
(0.73109245300292969, 0.0, 0.0), (0.73529410362243652, 0.0, 0.0),
(0.73949581384658813, 0.0, 0.0), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.0, 0.0), (0.75210082530975342, 0.0, 0.0),
(0.75630253553390503, 0.027450980618596077, 0.027450980618596077),
(0.76050418615341187, 0.043137256056070328, 0.043137256056070328),
(0.76470589637756348, 0.058823529630899429, 0.058823529630899429),
(0.76890754699707031, 0.074509806931018829, 0.074509806931018829),
(0.77310925722122192, 0.090196080505847931, 0.090196080505847931),
(0.77731090784072876, 0.10588235408067703, 0.10588235408067703),
(0.78151261806488037, 0.12156862765550613, 0.12156862765550613),
(0.78571426868438721, 0.13725490868091583, 0.13725490868091583),
(0.78991597890853882, 0.15294118225574493, 0.15294118225574493),
(0.79411762952804565, 0.16862745583057404, 0.16862745583057404),
(0.79831933975219727, 0.20000000298023224, 0.20000000298023224),
(0.8025209903717041, 0.21176470816135406, 0.21176470816135406),
(0.80672270059585571, 0.22745098173618317, 0.22745098173618317),
(0.81092435121536255, 0.24313725531101227, 0.24313725531101227),
(0.81512606143951416, 0.25882354378700256, 0.25882354378700256),
(0.819327712059021, 0.27450981736183167, 0.27450981736183167),
(0.82352942228317261, 0.29019609093666077, 0.29019609093666077),
(0.82773107290267944, 0.30588236451148987, 0.30588236451148987),
(0.83193278312683105, 0.32156863808631897, 0.32156863808631897),
(0.83613443374633789, 0.33725491166114807, 0.33725491166114807),
(0.8403361439704895, 0.35294118523597717, 0.35294118523597717),
(0.84453779458999634, 0.36862745881080627, 0.36862745881080627),
(0.84873950481414795, 0.38431373238563538, 0.38431373238563538),
(0.85294115543365479, 0.40000000596046448, 0.40000000596046448),
(0.8571428656578064, 0.4117647111415863, 0.4117647111415863),
(0.86134451627731323, 0.42745098471641541, 0.42745098471641541),
(0.86554622650146484, 0.44313725829124451, 0.44313725829124451),
(0.86974787712097168, 0.45882353186607361, 0.45882353186607361),
(0.87394958734512329, 0.47450980544090271, 0.47450980544090271),
(0.87815123796463013, 0.49019607901573181, 0.49019607901573181),
(0.88235294818878174, 0.5215686559677124, 0.5215686559677124),
(0.88655459880828857, 0.5372549295425415, 0.5372549295425415),
(0.89075630903244019, 0.55294120311737061, 0.55294120311737061),
(0.89495795965194702, 0.56862747669219971, 0.56862747669219971),
(0.89915966987609863, 0.58431375026702881, 0.58431375026702881),
(0.90336132049560547, 0.60000002384185791, 0.60000002384185791),
(0.90756303071975708, 0.61176472902297974, 0.61176472902297974),
(0.91176468133926392, 0.62745100259780884, 0.62745100259780884),
(0.91596639156341553, 0.64313727617263794, 0.64313727617263794),
(0.92016804218292236, 0.65882354974746704, 0.65882354974746704),
(0.92436975240707397, 0.67450982332229614, 0.67450982332229614),
(0.92857140302658081, 0.69019609689712524, 0.69019609689712524),
(0.93277311325073242, 0.70588237047195435, 0.70588237047195435),
(0.93697476387023926, 0.72156864404678345, 0.72156864404678345),
(0.94117647409439087, 0.73725491762161255, 0.73725491762161255),
(0.94537812471389771, 0.75294119119644165, 0.75294119119644165),
(0.94957983493804932, 0.76862746477127075, 0.76862746477127075),
(0.95378148555755615, 0.78431373834609985, 0.78431373834609985),
(0.95798319578170776, 0.80000001192092896, 0.80000001192092896),
(0.9621848464012146, 0.81176471710205078, 0.81176471710205078),
(0.96638655662536621, 0.84313726425170898, 0.84313726425170898),
(0.97058820724487305, 0.85882353782653809, 0.85882353782653809),
(0.97478991746902466, 0.87450981140136719, 0.87450981140136719),
(0.97899156808853149, 0.89019608497619629, 0.89019608497619629),
(0.98319327831268311, 0.90588235855102539, 0.90588235855102539),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388,
0.0039215688593685627, 0.0039215688593685627), (0.48319327831268311,
0.011764706112444401, 0.011764706112444401), (0.48739495873451233,
0.019607843831181526, 0.019607843831181526), (0.49159663915634155,
0.027450980618596077, 0.027450980618596077), (0.49579831957817078,
0.035294119268655777, 0.035294119268655777), (0.5, 0.043137256056070328,
0.043137256056070328), (0.50420171022415161, 0.058823529630899429,
0.058823529630899429), (0.50840336084365845, 0.066666670143604279,
0.066666670143604279), (0.51260507106781006, 0.070588238537311554,
0.070588238537311554), (0.51680672168731689, 0.078431375324726105,
0.078431375324726105), (0.52100843191146851, 0.086274512112140656,
0.086274512112140656), (0.52521008253097534, 0.094117648899555206,
0.094117648899555206), (0.52941179275512695, 0.10196078568696976,
0.10196078568696976), (0.53361344337463379, 0.10980392247438431,
0.10980392247438431), (0.5378151535987854, 0.11764705926179886,
0.11764705926179886), (0.54201680421829224, 0.12549020349979401,
0.12549020349979401), (0.54621851444244385, 0.13725490868091583,
0.13725490868091583), (0.55042016506195068, 0.14509804546833038,
0.14509804546833038), (0.55462187528610229, 0.15294118225574493,
0.15294118225574493), (0.55882352590560913, 0.16078431904315948,
0.16078431904315948), (0.56302523612976074, 0.16862745583057404,
0.16862745583057404), (0.56722688674926758, 0.17647059261798859,
0.17647059261798859), (0.57142859697341919, 0.18431372940540314,
0.18431372940540314), (0.57563024759292603, 0.19215686619281769,
0.19215686619281769), (0.57983195781707764, 0.20000000298023224,
0.20000000298023224), (0.58403360843658447, 0.20392157137393951,
0.20392157137393951), (0.58823531866073608, 0.21176470816135406,
0.21176470816135406), (0.59243696928024292, 0.21960784494876862,
0.21960784494876862), (0.59663867950439453, 0.22745098173618317,
0.22745098173618317), (0.60084033012390137, 0.23529411852359772,
0.23529411852359772), (0.60504204034805298, 0.24313725531101227,
0.24313725531101227), (0.60924369096755981, 0.25098040699958801,
0.25098040699958801), (0.61344540119171143, 0.25882354378700256,
0.25882354378700256), (0.61764705181121826, 0.26666668057441711,
0.26666668057441711), (0.62184876203536987, 0.27058824896812439,
0.27058824896812439), (0.62605041265487671, 0.27843138575553894,
0.27843138575553894), (0.63025212287902832, 0.29411765933036804,
0.29411765933036804), (0.63445377349853516, 0.30196079611778259,
0.30196079611778259), (0.63865548372268677, 0.30980393290519714,
0.30980393290519714), (0.6428571343421936, 0.31764706969261169,
0.31764706969261169), (0.64705884456634521, 0.32549020648002625,
0.32549020648002625), (0.65126049518585205, 0.3333333432674408,
0.3333333432674408), (0.65546220541000366, 0.33725491166114807,
0.33725491166114807), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.35294118523597717,
0.35294118523597717), (0.66806721687316895, 0.36078432202339172,
0.36078432202339172), (0.67226892709732056, 0.36862745881080627,
0.36862745881080627), (0.67647057771682739, 0.37647059559822083,
0.37647059559822083), (0.680672287940979, 0.38431373238563538,
0.38431373238563538), (0.68487393856048584, 0.39215686917304993,
0.39215686917304993), (0.68907564878463745, 0.40000000596046448,
0.40000000596046448), (0.69327729940414429, 0.40392157435417175,
0.40392157435417175), (0.6974790096282959, 0.4117647111415863,
0.4117647111415863), (0.70168066024780273, 0.41960784792900085,
0.41960784792900085), (0.70588237047195435, 0.42745098471641541,
0.42745098471641541), (0.71008402109146118, 0.43529412150382996,
0.43529412150382996), (0.71428573131561279, 0.45098039507865906,
0.45098039507865906), (0.71848738193511963, 0.45882353186607361,
0.45882353186607361), (0.72268909215927124, 0.46666666865348816,
0.46666666865348816), (0.72689074277877808, 0.47058823704719543,
0.47058823704719543), (0.73109245300292969, 0.47843137383460999,
0.47843137383460999), (0.73529410362243652, 0.48627451062202454,
0.48627451062202454), (0.73949581384658813, 0.49411764740943909,
0.49411764740943909), (0.74369746446609497, 0.50196081399917603,
0.50196081399917603), (0.74789917469024658, 0.50980395078659058,
0.50980395078659058), (0.75210082530975342, 0.51764708757400513,
0.51764708757400513), (0.75630253553390503, 0.53333336114883423,
0.53333336114883423), (0.76050418615341187, 0.5372549295425415,
0.5372549295425415), (0.76470589637756348, 0.54509806632995605,
0.54509806632995605), (0.76890754699707031, 0.55294120311737061,
0.55294120311737061), (0.77310925722122192, 0.56078433990478516,
0.56078433990478516), (0.77731090784072876, 0.56862747669219971,
0.56862747669219971), (0.78151261806488037, 0.57647061347961426,
0.57647061347961426), (0.78571426868438721, 0.58431375026702881,
0.58431375026702881), (0.78991597890853882, 0.59215688705444336,
0.59215688705444336), (0.79411762952804565, 0.60000002384185791,
0.60000002384185791), (0.79831933975219727, 0.61176472902297974,
0.61176472902297974), (0.8025209903717041, 0.61960786581039429,
0.61960786581039429), (0.80672270059585571, 0.62745100259780884,
0.62745100259780884), (0.81092435121536255, 0.63529413938522339,
0.63529413938522339), (0.81512606143951416, 0.64313727617263794,
0.64313727617263794), (0.819327712059021, 0.65098041296005249,
0.65098041296005249), (0.82352942228317261, 0.65882354974746704,
0.65882354974746704), (0.82773107290267944, 0.66666668653488159,
0.66666668653488159), (0.83193278312683105, 0.67058825492858887,
0.67058825492858887), (0.83613443374633789, 0.67843139171600342,
0.67843139171600342), (0.8403361439704895, 0.68627452850341797,
0.68627452850341797), (0.84453779458999634, 0.69411766529083252,
0.69411766529083252), (0.84873950481414795, 0.70196080207824707,
0.70196080207824707), (0.85294115543365479, 0.70980393886566162,
0.70980393886566162), (0.8571428656578064, 0.71764707565307617,
0.71764707565307617), (0.86134451627731323, 0.72549021244049072,
0.72549021244049072), (0.86554622650146484, 0.73333334922790527,
0.73333334922790527), (0.86974787712097168, 0.73725491762161255,
0.73725491762161255), (0.87394958734512329, 0.7450980544090271,
0.7450980544090271), (0.87815123796463013, 0.75294119119644165,
0.75294119119644165), (0.88235294818878174, 0.76862746477127075,
0.76862746477127075), (0.88655459880828857, 0.7764706015586853,
0.7764706015586853), (0.89075630903244019, 0.78431373834609985,
0.78431373834609985), (0.89495795965194702, 0.7921568751335144,
0.7921568751335144), (0.89915966987609863, 0.80000001192092896,
0.80000001192092896), (0.90336132049560547, 0.80392158031463623,
0.80392158031463623), (0.90756303071975708, 0.81176471710205078,
0.81176471710205078), (0.91176468133926392, 0.81960785388946533,
0.81960785388946533), (0.91596639156341553, 0.82745099067687988,
0.82745099067687988), (0.92016804218292236, 0.83529412746429443,
0.83529412746429443), (0.92436975240707397, 0.84313726425170898,
0.84313726425170898), (0.92857140302658081, 0.85098040103912354,
0.85098040103912354), (0.93277311325073242, 0.85882353782653809,
0.85882353782653809), (0.93697476387023926, 0.86666667461395264,
0.86666667461395264), (0.94117647409439087, 0.87058824300765991,
0.87058824300765991), (0.94537812471389771, 0.87843137979507446,
0.87843137979507446), (0.94957983493804932, 0.88627451658248901,
0.88627451658248901), (0.95378148555755615, 0.89411765336990356,
0.89411765336990356), (0.95798319578170776, 0.90196079015731812,
0.90196079015731812), (0.9621848464012146, 0.90980392694473267,
0.90980392694473267), (0.96638655662536621, 0.92549020051956177,
0.92549020051956177), (0.97058820724487305, 0.93333333730697632,
0.93333333730697632), (0.97478991746902466, 0.93725490570068359,
0.93725490570068359), (0.97899156808853149, 0.94509804248809814,
0.94509804248809814), (0.98319327831268311, 0.9529411792755127,
0.9529411792755127), (0.98739492893218994, 0.96078431606292725,
0.96078431606292725), (0.99159663915634155, 0.9686274528503418,
0.9686274528503418), (0.99579828977584839, 0.97647058963775635,
0.97647058963775635), (1.0, 0.9843137264251709, 0.9843137264251709)],
'red': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.015686275437474251,
0.015686275437474251), (0.016806723549962044, 0.019607843831181526,
0.019607843831181526), (0.021008403971791267, 0.027450980618596077,
0.027450980618596077), (0.025210084393620491, 0.031372550874948502,
0.031372550874948502), (0.029411764815449715, 0.039215687662363052,
0.039215687662363052), (0.033613447099924088, 0.043137256056070328,
0.043137256056070328), (0.037815127521753311, 0.050980392843484879,
0.050980392843484879), (0.042016807943582535, 0.058823529630899429,
0.058823529630899429), (0.046218488365411758, 0.066666670143604279,
0.066666670143604279), (0.050420168787240982, 0.070588238537311554,
0.070588238537311554), (0.054621849209070206, 0.078431375324726105,
0.078431375324726105), (0.058823529630899429, 0.08235294371843338,
0.08235294371843338), (0.063025213778018951, 0.090196080505847931,
0.090196080505847931), (0.067226894199848175, 0.094117648899555206,
0.094117648899555206), (0.071428574621677399, 0.10196078568696976,
0.10196078568696976), (0.075630255043506622, 0.10588235408067703,
0.10588235408067703), (0.079831935465335846, 0.10980392247438431,
0.10980392247438431), (0.08403361588716507, 0.11764705926179886,
0.11764705926179886), (0.088235296308994293, 0.12156862765550613,
0.12156862765550613), (0.092436976730823517, 0.12941177189350128,
0.12941177189350128), (0.09663865715265274, 0.13333334028720856,
0.13333334028720856), (0.10084033757448196, 0.14117647707462311,
0.14117647707462311), (0.10504201799631119, 0.14509804546833038,
0.14509804546833038), (0.10924369841814041, 0.15294118225574493,
0.15294118225574493), (0.11344537883996964, 0.15686275064945221,
0.15686275064945221), (0.11764705926179886, 0.16470588743686676,
0.16470588743686676), (0.12184873968362808, 0.16862745583057404,
0.16862745583057404), (0.1260504275560379, 0.18039216101169586,
0.18039216101169586), (0.13025210797786713, 0.18431372940540314,
0.18431372940540314), (0.13445378839969635, 0.19215686619281769,
0.19215686619281769), (0.13865546882152557, 0.19607843458652496,
0.19607843458652496), (0.1428571492433548, 0.20392157137393951,
0.20392157137393951), (0.14705882966518402, 0.20784313976764679,
0.20784313976764679), (0.15126051008701324, 0.21568627655506134,
0.21568627655506134), (0.15546219050884247, 0.21960784494876862,
0.21960784494876862), (0.15966387093067169, 0.22352941334247589,
0.22352941334247589), (0.16386555135250092, 0.23137255012989044,
0.23137255012989044), (0.16806723177433014, 0.23529411852359772,
0.23529411852359772), (0.17226891219615936, 0.24313725531101227,
0.24313725531101227), (0.17647059261798859, 0.24705882370471954,
0.24705882370471954), (0.18067227303981781, 0.25490197539329529,
0.25490197539329529), (0.18487395346164703, 0.25882354378700256,
0.25882354378700256), (0.18907563388347626, 0.26666668057441711,
0.26666668057441711), (0.19327731430530548, 0.27058824896812439,
0.27058824896812439), (0.1974789947271347, 0.27450981736183167,
0.27450981736183167), (0.20168067514896393, 0.28235295414924622,
0.28235295414924622), (0.20588235557079315, 0.28627452254295349,
0.28627452254295349), (0.21008403599262238, 0.29803922772407532,
0.29803922772407532), (0.2142857164144516, 0.30588236451148987,
0.30588236451148987), (0.21848739683628082, 0.30980393290519714,
0.30980393290519714), (0.22268907725811005, 0.31764706969261169,
0.31764706969261169), (0.22689075767993927, 0.32156863808631897,
0.32156863808631897), (0.23109243810176849, 0.32941177487373352,
0.32941177487373352), (0.23529411852359772, 0.3333333432674408,
0.3333333432674408), (0.23949579894542694, 0.33725491166114807,
0.33725491166114807), (0.24369747936725616, 0.34509804844856262,
0.34509804844856262), (0.24789915978908539, 0.3490196168422699,
0.3490196168422699), (0.25210085511207581, 0.36078432202339172,
0.36078432202339172), (0.25630253553390503, 0.36862745881080627,
0.36862745881080627), (0.26050421595573425, 0.37254902720451355,
0.37254902720451355), (0.26470589637756348, 0.3803921639919281,
0.3803921639919281), (0.2689075767993927, 0.38431373238563538,
0.38431373238563538), (0.27310925722122192, 0.38823530077934265,
0.38823530077934265), (0.27731093764305115, 0.3960784375667572,
0.3960784375667572), (0.28151261806488037, 0.40000000596046448,
0.40000000596046448), (0.28571429848670959, 0.40784314274787903,
0.40784314274787903), (0.28991597890853882, 0.4117647111415863,
0.4117647111415863), (0.29411765933036804, 0.42352941632270813,
0.42352941632270813), (0.29831933975219727, 0.43137255311012268,
0.43137255311012268), (0.30252102017402649, 0.43529412150382996,
0.43529412150382996), (0.30672270059585571, 0.44313725829124451,
0.44313725829124451), (0.31092438101768494, 0.44705882668495178,
0.44705882668495178), (0.31512606143951416, 0.45098039507865906,
0.45098039507865906), (0.31932774186134338, 0.45882353186607361,
0.45882353186607361), (0.32352942228317261, 0.46274510025978088,
0.46274510025978088), (0.32773110270500183, 0.47058823704719543,
0.47058823704719543), (0.33193278312683105, 0.47450980544090271,
0.47450980544090271), (0.33613446354866028, 0.48235294222831726,
0.48235294222831726), (0.3403361439704895, 0.48627451062202454,
0.48627451062202454), (0.34453782439231873, 0.49411764740943909,
0.49411764740943909), (0.34873950481414795, 0.49803921580314636,
0.49803921580314636), (0.35294118523597717, 0.50196081399917603,
0.50196081399917603), (0.3571428656578064, 0.50980395078659058,
0.50980395078659058), (0.36134454607963562, 0.51372551918029785,
0.51372551918029785), (0.36554622650146484, 0.5215686559677124,
0.5215686559677124), (0.36974790692329407, 0.52549022436141968,
0.52549022436141968), (0.37394958734512329, 0.53333336114883423,
0.53333336114883423), (0.37815126776695251, 0.54509806632995605,
0.54509806632995605), (0.38235294818878174, 0.54901963472366333,
0.54901963472366333), (0.38655462861061096, 0.55294120311737061,
0.55294120311737061), (0.39075630903244019, 0.56078433990478516,
0.56078433990478516), (0.39495798945426941, 0.56470590829849243,
0.56470590829849243), (0.39915966987609863, 0.57254904508590698,
0.57254904508590698), (0.40336135029792786, 0.57647061347961426,
0.57647061347961426), (0.40756303071975708, 0.58431375026702881,
0.58431375026702881), (0.4117647111415863, 0.58823531866073608,
0.58823531866073608), (0.41596639156341553, 0.59607845544815063,
0.59607845544815063), (0.42016807198524475, 0.60000002384185791,
0.60000002384185791), (0.42436975240707397, 0.60784316062927246,
0.60784316062927246), (0.4285714328289032, 0.61176472902297974,
0.61176472902297974), (0.43277311325073242, 0.61568629741668701,
0.61568629741668701), (0.43697479367256165, 0.62352943420410156,
0.62352943420410156), (0.44117647409439087, 0.62745100259780884,
0.62745100259780884), (0.44537815451622009, 0.63529413938522339,
0.63529413938522339), (0.44957983493804932, 0.63921570777893066,
0.63921570777893066), (0.45378151535987854, 0.64705884456634521,
0.64705884456634521), (0.45798319578170776, 0.65098041296005249,
0.65098041296005249), (0.46218487620353699, 0.66274511814117432,
0.66274511814117432), (0.46638655662536621, 0.66666668653488159,
0.66666668653488159), (0.47058823704719543, 0.67450982332229614,
0.67450982332229614), (0.47478991746902466, 0.67843139171600342,
0.67843139171600342), (0.47899159789085388, 0.68627452850341797,
0.68627452850341797), (0.48319327831268311, 0.69019609689712524,
0.69019609689712524), (0.48739495873451233, 0.69803923368453979,
0.69803923368453979), (0.49159663915634155, 0.70196080207824707,
0.70196080207824707), (0.49579831957817078, 0.70980393886566162,
0.70980393886566162), (0.5, 0.7137255072593689, 0.7137255072593689),
(0.50420171022415161, 0.72549021244049072, 0.72549021244049072),
(0.50840336084365845, 0.729411780834198, 0.729411780834198),
(0.51260507106781006, 0.73725491762161255, 0.73725491762161255),
(0.51680672168731689, 0.74117648601531982, 0.74117648601531982),
(0.52100843191146851, 0.74901962280273438, 0.74901962280273438),
(0.52521008253097534, 0.75294119119644165, 0.75294119119644165),
(0.52941179275512695, 0.7607843279838562, 0.7607843279838562),
(0.53361344337463379, 0.76470589637756348, 0.76470589637756348),
(0.5378151535987854, 0.77254903316497803, 0.77254903316497803),
(0.54201680421829224, 0.7764706015586853, 0.7764706015586853),
(0.54621851444244385, 0.78823530673980713, 0.78823530673980713),
(0.55042016506195068, 0.7921568751335144, 0.7921568751335144),
(0.55462187528610229, 0.80000001192092896, 0.80000001192092896),
(0.55882352590560913, 0.80392158031463623, 0.80392158031463623),
(0.56302523612976074, 0.81176471710205078, 0.81176471710205078),
(0.56722688674926758, 0.81568628549575806, 0.81568628549575806),
(0.57142859697341919, 0.82352942228317261, 0.82352942228317261),
(0.57563024759292603, 0.82745099067687988, 0.82745099067687988),
(0.57983195781707764, 0.83137255907058716, 0.83137255907058716),
(0.58403360843658447, 0.83921569585800171, 0.83921569585800171),
(0.58823531866073608, 0.84313726425170898, 0.84313726425170898),
(0.59243696928024292, 0.85098040103912354, 0.85098040103912354),
(0.59663867950439453, 0.85490196943283081, 0.85490196943283081),
(0.60084033012390137, 0.86274510622024536, 0.86274510622024536),
(0.60504204034805298, 0.86666667461395264, 0.86666667461395264),
(0.60924369096755981, 0.87450981140136719, 0.87450981140136719),
(0.61344540119171143, 0.87843137979507446, 0.87843137979507446),
(0.61764705181121826, 0.88627451658248901, 0.88627451658248901),
(0.62184876203536987, 0.89019608497619629, 0.89019608497619629),
(0.62605041265487671, 0.89411765336990356, 0.89411765336990356),
(0.63025212287902832, 0.90588235855102539, 0.90588235855102539),
(0.63445377349853516, 0.91372549533843994, 0.91372549533843994),
(0.63865548372268677, 0.91764706373214722, 0.91764706373214722),
(0.6428571343421936, 0.92549020051956177, 0.92549020051956177),
(0.64705884456634521, 0.92941176891326904, 0.92941176891326904),
(0.65126049518585205, 0.93725490570068359, 0.93725490570068359),
(0.65546220541000366, 0.94117647409439087, 0.94117647409439087),
(0.6596638560295105, 0.94509804248809814, 0.94509804248809814),
(0.66386556625366211, 0.9529411792755127, 0.9529411792755127),
(0.66806721687316895, 0.95686274766921997, 0.95686274766921997),
(0.67226892709732056, 0.96470588445663452, 0.96470588445663452),
(0.67647057771682739, 0.9686274528503418, 0.9686274528503418),
(0.680672287940979, 0.97647058963775635, 0.97647058963775635),
(0.68487393856048584, 0.98039215803146362, 0.98039215803146362),
(0.68907564878463745, 0.98823529481887817, 0.98823529481887817),
(0.69327729940414429, 0.99215686321258545, 0.99215686321258545),
(0.6974790096282959, 1.0, 1.0), (0.70168066024780273, 1.0, 1.0),
(0.70588237047195435, 1.0, 1.0), (0.71008402109146118, 1.0, 1.0),
(0.71428573131561279, 1.0, 1.0), (0.71848738193511963, 1.0, 1.0),
(0.72268909215927124, 1.0, 1.0), (0.72689074277877808, 1.0, 1.0),
(0.73109245300292969, 1.0, 1.0), (0.73529410362243652, 1.0, 1.0),
(0.73949581384658813, 1.0, 1.0), (0.74369746446609497, 1.0, 1.0),
(0.74789917469024658, 1.0, 1.0), (0.75210082530975342, 1.0, 1.0),
(0.75630253553390503, 1.0, 1.0), (0.76050418615341187, 1.0, 1.0),
(0.76470589637756348, 1.0, 1.0), (0.76890754699707031, 1.0, 1.0),
(0.77310925722122192, 1.0, 1.0), (0.77731090784072876, 1.0, 1.0),
(0.78151261806488037, 1.0, 1.0), (0.78571426868438721, 1.0, 1.0),
(0.78991597890853882, 1.0, 1.0), (0.79411762952804565, 1.0, 1.0),
(0.79831933975219727, 1.0, 1.0), (0.8025209903717041, 1.0, 1.0),
(0.80672270059585571, 1.0, 1.0), (0.81092435121536255, 1.0, 1.0),
(0.81512606143951416, 1.0, 1.0), (0.819327712059021, 1.0, 1.0),
(0.82352942228317261, 1.0, 1.0), (0.82773107290267944, 1.0, 1.0),
(0.83193278312683105, 1.0, 1.0), (0.83613443374633789, 1.0, 1.0),
(0.8403361439704895, 1.0, 1.0), (0.84453779458999634, 1.0, 1.0),
(0.84873950481414795, 1.0, 1.0), (0.85294115543365479, 1.0, 1.0),
(0.8571428656578064, 1.0, 1.0), (0.86134451627731323, 1.0, 1.0),
(0.86554622650146484, 1.0, 1.0), (0.86974787712097168, 1.0, 1.0),
(0.87394958734512329, 1.0, 1.0), (0.87815123796463013, 1.0, 1.0),
(0.88235294818878174, 1.0, 1.0), (0.88655459880828857, 1.0, 1.0),
(0.89075630903244019, 1.0, 1.0), (0.89495795965194702, 1.0, 1.0),
(0.89915966987609863, 1.0, 1.0), (0.90336132049560547, 1.0, 1.0),
(0.90756303071975708, 1.0, 1.0), (0.91176468133926392, 1.0, 1.0),
(0.91596639156341553, 1.0, 1.0), (0.92016804218292236, 1.0, 1.0),
(0.92436975240707397, 1.0, 1.0), (0.92857140302658081, 1.0, 1.0),
(0.93277311325073242, 1.0, 1.0), (0.93697476387023926, 1.0, 1.0),
(0.94117647409439087, 1.0, 1.0), (0.94537812471389771, 1.0, 1.0),
(0.94957983493804932, 1.0, 1.0), (0.95378148555755615, 1.0, 1.0),
(0.95798319578170776, 1.0, 1.0), (0.9621848464012146, 1.0, 1.0),
(0.96638655662536621, 1.0, 1.0), (0.97058820724487305, 1.0, 1.0),
(0.97478991746902466, 1.0, 1.0), (0.97899156808853149, 1.0, 1.0),
(0.98319327831268311, 1.0, 1.0), (0.98739492893218994, 1.0, 1.0),
(0.99159663915634155, 1.0, 1.0), (0.99579828977584839, 1.0, 1.0), (1.0,
1.0, 1.0)]}
_gist_ncar_data = {'blue': [(0.0, 0.50196081399917603,
0.50196081399917603), (0.0050505050458014011, 0.45098039507865906,
0.45098039507865906), (0.010101010091602802, 0.40392157435417175,
0.40392157435417175), (0.015151515603065491, 0.35686275362968445,
0.35686275362968445), (0.020202020183205605, 0.30980393290519714,
0.30980393290519714), (0.025252524763345718, 0.25882354378700256,
0.25882354378700256), (0.030303031206130981, 0.21176470816135406,
0.21176470816135406), (0.035353533923625946, 0.16470588743686676,
0.16470588743686676), (0.040404040366411209, 0.11764705926179886,
0.11764705926179886), (0.045454546809196472, 0.070588238537311554,
0.070588238537311554), (0.050505049526691437, 0.019607843831181526,
0.019607843831181526), (0.0555555559694767, 0.047058824449777603,
0.047058824449777603), (0.060606062412261963, 0.14509804546833038,
0.14509804546833038), (0.065656565129756927, 0.23921568691730499,
0.23921568691730499), (0.070707067847251892, 0.3333333432674408,
0.3333333432674408), (0.075757578015327454, 0.43137255311012268,
0.43137255311012268), (0.080808080732822418, 0.52549022436141968,
0.52549022436141968), (0.085858583450317383, 0.61960786581039429,
0.61960786581039429), (0.090909093618392944, 0.71764707565307617,
0.71764707565307617), (0.095959596335887909, 0.81176471710205078,
0.81176471710205078), (0.10101009905338287, 0.90588235855102539,
0.90588235855102539), (0.10606060922145844, 1.0, 1.0),
(0.1111111119389534, 1.0, 1.0), (0.11616161465644836, 1.0, 1.0),
(0.12121212482452393, 1.0, 1.0), (0.12626262009143829, 1.0, 1.0),
(0.13131313025951385, 1.0, 1.0), (0.13636364042758942, 1.0, 1.0),
(0.14141413569450378, 1.0, 1.0), (0.14646464586257935, 1.0, 1.0),
(0.15151515603065491, 1.0, 1.0), (0.15656565129756927, 1.0, 1.0),
(0.16161616146564484, 1.0, 1.0), (0.1666666716337204, 1.0, 1.0),
(0.17171716690063477, 1.0, 1.0), (0.17676767706871033, 1.0, 1.0),
(0.18181818723678589, 1.0, 1.0), (0.18686868250370026, 1.0, 1.0),
(0.19191919267177582, 1.0, 1.0), (0.19696970283985138, 1.0, 1.0),
(0.20202019810676575, 1.0, 1.0), (0.20707070827484131, 1.0, 1.0),
(0.21212121844291687, 0.99215686321258545, 0.99215686321258545),
(0.21717171370983124, 0.95686274766921997, 0.95686274766921997),
(0.2222222238779068, 0.91764706373214722, 0.91764706373214722),
(0.22727273404598236, 0.88235294818878174, 0.88235294818878174),
(0.23232322931289673, 0.84313726425170898, 0.84313726425170898),
(0.23737373948097229, 0.80392158031463623, 0.80392158031463623),
(0.24242424964904785, 0.76862746477127075, 0.76862746477127075),
(0.24747474491596222, 0.729411780834198, 0.729411780834198),
(0.25252524018287659, 0.69019609689712524, 0.69019609689712524),
(0.25757575035095215, 0.65490198135375977, 0.65490198135375977),
(0.26262626051902771, 0.61568629741668701, 0.61568629741668701),
(0.26767677068710327, 0.56470590829849243, 0.56470590829849243),
(0.27272728085517883, 0.50980395078659058, 0.50980395078659058),
(0.27777779102325439, 0.45098039507865906, 0.45098039507865906),
(0.28282827138900757, 0.39215686917304993, 0.39215686917304993),
(0.28787878155708313, 0.3333333432674408, 0.3333333432674408),
(0.29292929172515869, 0.27843138575553894, 0.27843138575553894),
(0.29797980189323425, 0.21960784494876862, 0.21960784494876862),
(0.30303031206130981, 0.16078431904315948, 0.16078431904315948),
(0.30808082222938538, 0.10588235408067703, 0.10588235408067703),
(0.31313130259513855, 0.047058824449777603, 0.047058824449777603),
(0.31818181276321411, 0.0, 0.0), (0.32323232293128967, 0.0, 0.0),
(0.32828283309936523, 0.0, 0.0), (0.3333333432674408, 0.0, 0.0),
(0.33838382363319397, 0.0, 0.0), (0.34343433380126953, 0.0, 0.0),
(0.34848484396934509, 0.0, 0.0), (0.35353535413742065, 0.0, 0.0),
(0.35858586430549622, 0.0, 0.0), (0.36363637447357178, 0.0, 0.0),
(0.36868685483932495, 0.0, 0.0), (0.37373736500740051, 0.0, 0.0),
(0.37878787517547607, 0.0, 0.0), (0.38383838534355164, 0.0, 0.0),
(0.3888888955116272, 0.0, 0.0), (0.39393940567970276, 0.0, 0.0),
(0.39898988604545593, 0.0, 0.0), (0.40404039621353149, 0.0, 0.0),
(0.40909090638160706, 0.0, 0.0), (0.41414141654968262, 0.0, 0.0),
(0.41919192671775818, 0.0, 0.0), (0.42424243688583374,
0.0039215688593685627, 0.0039215688593685627), (0.42929291725158691,
0.027450980618596077, 0.027450980618596077), (0.43434342741966248,
0.050980392843484879, 0.050980392843484879), (0.43939393758773804,
0.074509806931018829, 0.074509806931018829), (0.4444444477558136,
0.094117648899555206, 0.094117648899555206), (0.44949495792388916,
0.11764705926179886, 0.11764705926179886), (0.45454546809196472,
0.14117647707462311, 0.14117647707462311), (0.4595959484577179,
0.16470588743686676, 0.16470588743686676), (0.46464645862579346,
0.18823529779911041, 0.18823529779911041), (0.46969696879386902,
0.21176470816135406, 0.21176470816135406), (0.47474747896194458,
0.23529411852359772, 0.23529411852359772), (0.47979798913002014,
0.22352941334247589, 0.22352941334247589), (0.4848484992980957,
0.20000000298023224, 0.20000000298023224), (0.48989897966384888,
0.17647059261798859, 0.17647059261798859), (0.49494948983192444,
0.15294118225574493, 0.15294118225574493), (0.5, 0.12941177189350128,
0.12941177189350128), (0.50505048036575317, 0.10980392247438431,
0.10980392247438431), (0.51010102033615112, 0.086274512112140656,
0.086274512112140656), (0.5151515007019043, 0.062745101749897003,
0.062745101749897003), (0.52020204067230225, 0.039215687662363052,
0.039215687662363052), (0.52525252103805542, 0.015686275437474251,
0.015686275437474251), (0.53030300140380859, 0.0, 0.0),
(0.53535354137420654, 0.0, 0.0), (0.54040402173995972, 0.0, 0.0),
(0.54545456171035767, 0.0, 0.0), (0.55050504207611084, 0.0, 0.0),
(0.55555558204650879, 0.0, 0.0), (0.56060606241226196, 0.0, 0.0),
(0.56565654277801514, 0.0, 0.0), (0.57070708274841309, 0.0, 0.0),
(0.57575756311416626, 0.0, 0.0), (0.58080810308456421, 0.0, 0.0),
(0.58585858345031738, 0.0039215688593685627, 0.0039215688593685627),
(0.59090906381607056, 0.0078431377187371254, 0.0078431377187371254),
(0.59595960378646851, 0.011764706112444401, 0.011764706112444401),
(0.60101008415222168, 0.019607843831181526, 0.019607843831181526),
(0.60606062412261963, 0.023529412224888802, 0.023529412224888802),
(0.6111111044883728, 0.031372550874948502, 0.031372550874948502),
(0.61616164445877075, 0.035294119268655777, 0.035294119268655777),
(0.62121212482452393, 0.043137256056070328, 0.043137256056070328),
(0.6262626051902771, 0.047058824449777603, 0.047058824449777603),
(0.63131314516067505, 0.054901961237192154, 0.054901961237192154),
(0.63636362552642822, 0.054901961237192154, 0.054901961237192154),
(0.64141416549682617, 0.050980392843484879, 0.050980392843484879),
(0.64646464586257935, 0.043137256056070328, 0.043137256056070328),
(0.65151512622833252, 0.039215687662363052, 0.039215687662363052),
(0.65656566619873047, 0.031372550874948502, 0.031372550874948502),
(0.66161614656448364, 0.027450980618596077, 0.027450980618596077),
(0.66666668653488159, 0.019607843831181526, 0.019607843831181526),
(0.67171716690063477, 0.015686275437474251, 0.015686275437474251),
(0.67676764726638794, 0.011764706112444401, 0.011764706112444401),
(0.68181818723678589, 0.0039215688593685627, 0.0039215688593685627),
(0.68686866760253906, 0.0, 0.0), (0.69191920757293701, 0.0, 0.0),
(0.69696968793869019, 0.0, 0.0), (0.70202022790908813, 0.0, 0.0),
(0.70707070827484131, 0.0, 0.0), (0.71212118864059448, 0.0, 0.0),
(0.71717172861099243, 0.0, 0.0), (0.72222220897674561, 0.0, 0.0),
(0.72727274894714355, 0.0, 0.0), (0.73232322931289673, 0.0, 0.0),
(0.7373737096786499, 0.0, 0.0), (0.74242424964904785,
0.031372550874948502, 0.031372550874948502), (0.74747473001480103,
0.12941177189350128, 0.12941177189350128), (0.75252526998519897,
0.22352941334247589, 0.22352941334247589), (0.75757575035095215,
0.32156863808631897, 0.32156863808631897), (0.7626262903213501,
0.41568627953529358, 0.41568627953529358), (0.76767677068710327,
0.50980395078659058, 0.50980395078659058), (0.77272725105285645,
0.60784316062927246, 0.60784316062927246), (0.77777779102325439,
0.70196080207824707, 0.70196080207824707), (0.78282827138900757,
0.79607844352722168, 0.79607844352722168), (0.78787881135940552,
0.89411765336990356, 0.89411765336990356), (0.79292929172515869,
0.98823529481887817, 0.98823529481887817), (0.79797977209091187, 1.0,
1.0), (0.80303031206130981, 1.0, 1.0), (0.80808079242706299, 1.0, 1.0),
(0.81313133239746094, 1.0, 1.0), (0.81818181276321411, 1.0, 1.0),
(0.82323235273361206, 1.0, 1.0), (0.82828283309936523, 1.0, 1.0),
(0.83333331346511841, 1.0, 1.0), (0.83838385343551636, 1.0, 1.0),
(0.84343433380126953, 1.0, 1.0), (0.84848487377166748,
0.99607843160629272, 0.99607843160629272), (0.85353535413742065,
0.98823529481887817, 0.98823529481887817), (0.85858583450317383,
0.9843137264251709, 0.9843137264251709), (0.86363637447357178,
0.97647058963775635, 0.97647058963775635), (0.86868685483932495,
0.9686274528503418, 0.9686274528503418), (0.8737373948097229,
0.96470588445663452, 0.96470588445663452), (0.87878787517547607,
0.95686274766921997, 0.95686274766921997), (0.88383835554122925,
0.94901961088180542, 0.94901961088180542), (0.8888888955116272,
0.94509804248809814, 0.94509804248809814), (0.89393937587738037,
0.93725490570068359, 0.93725490570068359), (0.89898991584777832,
0.93333333730697632, 0.93333333730697632), (0.90404039621353149,
0.93333333730697632, 0.93333333730697632), (0.90909093618392944,
0.93725490570068359, 0.93725490570068359), (0.91414141654968262,
0.93725490570068359, 0.93725490570068359), (0.91919189691543579,
0.94117647409439087, 0.94117647409439087), (0.92424243688583374,
0.94509804248809814, 0.94509804248809814), (0.92929291725158691,
0.94509804248809814, 0.94509804248809814), (0.93434345722198486,
0.94901961088180542, 0.94901961088180542), (0.93939393758773804,
0.9529411792755127, 0.9529411792755127), (0.94444441795349121,
0.9529411792755127, 0.9529411792755127), (0.94949495792388916,
0.95686274766921997, 0.95686274766921997), (0.95454543828964233,
0.96078431606292725, 0.96078431606292725), (0.95959597826004028,
0.96470588445663452, 0.96470588445663452), (0.96464645862579346,
0.9686274528503418, 0.9686274528503418), (0.96969699859619141,
0.97254902124404907, 0.97254902124404907), (0.97474747896194458,
0.97647058963775635, 0.97647058963775635), (0.97979795932769775,
0.98039215803146362, 0.98039215803146362), (0.9848484992980957,
0.9843137264251709, 0.9843137264251709), (0.98989897966384888,
0.98823529481887817, 0.98823529481887817), (0.99494951963424683,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)], 'green': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.035294119268655777, 0.035294119268655777), (0.010101010091602802,
0.074509806931018829, 0.074509806931018829), (0.015151515603065491,
0.10980392247438431, 0.10980392247438431), (0.020202020183205605,
0.14901961386203766, 0.14901961386203766), (0.025252524763345718,
0.18431372940540314, 0.18431372940540314), (0.030303031206130981,
0.22352941334247589, 0.22352941334247589), (0.035353533923625946,
0.25882354378700256, 0.25882354378700256), (0.040404040366411209,
0.29803922772407532, 0.29803922772407532), (0.045454546809196472,
0.3333333432674408, 0.3333333432674408), (0.050505049526691437,
0.37254902720451355, 0.37254902720451355), (0.0555555559694767,
0.36862745881080627, 0.36862745881080627), (0.060606062412261963,
0.3333333432674408, 0.3333333432674408), (0.065656565129756927,
0.29411765933036804, 0.29411765933036804), (0.070707067847251892,
0.25882354378700256, 0.25882354378700256), (0.075757578015327454,
0.21960784494876862, 0.21960784494876862), (0.080808080732822418,
0.18431372940540314, 0.18431372940540314), (0.085858583450317383,
0.14509804546833038, 0.14509804546833038), (0.090909093618392944,
0.10980392247438431, 0.10980392247438431), (0.095959596335887909,
0.070588238537311554, 0.070588238537311554), (0.10101009905338287,
0.035294119268655777, 0.035294119268655777), (0.10606060922145844, 0.0,
0.0), (0.1111111119389534, 0.074509806931018829, 0.074509806931018829),
(0.11616161465644836, 0.14509804546833038, 0.14509804546833038),
(0.12121212482452393, 0.21568627655506134, 0.21568627655506134),
(0.12626262009143829, 0.28627452254295349, 0.28627452254295349),
(0.13131313025951385, 0.36078432202339172, 0.36078432202339172),
(0.13636364042758942, 0.43137255311012268, 0.43137255311012268),
(0.14141413569450378, 0.50196081399917603, 0.50196081399917603),
(0.14646464586257935, 0.57254904508590698, 0.57254904508590698),
(0.15151515603065491, 0.64705884456634521, 0.64705884456634521),
(0.15656565129756927, 0.71764707565307617, 0.71764707565307617),
(0.16161616146564484, 0.7607843279838562, 0.7607843279838562),
(0.1666666716337204, 0.78431373834609985, 0.78431373834609985),
(0.17171716690063477, 0.80784314870834351, 0.80784314870834351),
(0.17676767706871033, 0.83137255907058716, 0.83137255907058716),
(0.18181818723678589, 0.85490196943283081, 0.85490196943283081),
(0.18686868250370026, 0.88235294818878174, 0.88235294818878174),
(0.19191919267177582, 0.90588235855102539, 0.90588235855102539),
(0.19696970283985138, 0.92941176891326904, 0.92941176891326904),
(0.20202019810676575, 0.9529411792755127, 0.9529411792755127),
(0.20707070827484131, 0.97647058963775635, 0.97647058963775635),
(0.21212121844291687, 0.99607843160629272, 0.99607843160629272),
(0.21717171370983124, 0.99607843160629272, 0.99607843160629272),
(0.2222222238779068, 0.99215686321258545, 0.99215686321258545),
(0.22727273404598236, 0.99215686321258545, 0.99215686321258545),
(0.23232322931289673, 0.99215686321258545, 0.99215686321258545),
(0.23737373948097229, 0.98823529481887817, 0.98823529481887817),
(0.24242424964904785, 0.98823529481887817, 0.98823529481887817),
(0.24747474491596222, 0.9843137264251709, 0.9843137264251709),
(0.25252524018287659, 0.9843137264251709, 0.9843137264251709),
(0.25757575035095215, 0.98039215803146362, 0.98039215803146362),
(0.26262626051902771, 0.98039215803146362, 0.98039215803146362),
(0.26767677068710327, 0.98039215803146362, 0.98039215803146362),
(0.27272728085517883, 0.98039215803146362, 0.98039215803146362),
(0.27777779102325439, 0.9843137264251709, 0.9843137264251709),
(0.28282827138900757, 0.9843137264251709, 0.9843137264251709),
(0.28787878155708313, 0.98823529481887817, 0.98823529481887817),
(0.29292929172515869, 0.98823529481887817, 0.98823529481887817),
(0.29797980189323425, 0.99215686321258545, 0.99215686321258545),
(0.30303031206130981, 0.99215686321258545, 0.99215686321258545),
(0.30808082222938538, 0.99607843160629272, 0.99607843160629272),
(0.31313130259513855, 0.99607843160629272, 0.99607843160629272),
(0.31818181276321411, 0.99607843160629272, 0.99607843160629272),
(0.32323232293128967, 0.97647058963775635, 0.97647058963775635),
(0.32828283309936523, 0.95686274766921997, 0.95686274766921997),
(0.3333333432674408, 0.93725490570068359, 0.93725490570068359),
(0.33838382363319397, 0.92156863212585449, 0.92156863212585449),
(0.34343433380126953, 0.90196079015731812, 0.90196079015731812),
(0.34848484396934509, 0.88235294818878174, 0.88235294818878174),
(0.35353535413742065, 0.86274510622024536, 0.86274510622024536),
(0.35858586430549622, 0.84705883264541626, 0.84705883264541626),
(0.36363637447357178, 0.82745099067687988, 0.82745099067687988),
(0.36868685483932495, 0.80784314870834351, 0.80784314870834351),
(0.37373736500740051, 0.81568628549575806, 0.81568628549575806),
(0.37878787517547607, 0.83529412746429443, 0.83529412746429443),
(0.38383838534355164, 0.85098040103912354, 0.85098040103912354),
(0.3888888955116272, 0.87058824300765991, 0.87058824300765991),
(0.39393940567970276, 0.89019608497619629, 0.89019608497619629),
(0.39898988604545593, 0.90980392694473267, 0.90980392694473267),
(0.40404039621353149, 0.92549020051956177, 0.92549020051956177),
(0.40909090638160706, 0.94509804248809814, 0.94509804248809814),
(0.41414141654968262, 0.96470588445663452, 0.96470588445663452),
(0.41919192671775818, 0.9843137264251709, 0.9843137264251709),
(0.42424243688583374, 1.0, 1.0), (0.42929291725158691, 1.0, 1.0),
(0.43434342741966248, 1.0, 1.0), (0.43939393758773804, 1.0, 1.0),
(0.4444444477558136, 1.0, 1.0), (0.44949495792388916, 1.0, 1.0),
(0.45454546809196472, 1.0, 1.0), (0.4595959484577179, 1.0, 1.0),
(0.46464645862579346, 1.0, 1.0), (0.46969696879386902, 1.0, 1.0),
(0.47474747896194458, 1.0, 1.0), (0.47979798913002014, 1.0, 1.0),
(0.4848484992980957, 1.0, 1.0), (0.48989897966384888, 1.0, 1.0),
(0.49494948983192444, 1.0, 1.0), (0.5, 1.0, 1.0), (0.50505048036575317,
1.0, 1.0), (0.51010102033615112, 1.0, 1.0), (0.5151515007019043, 1.0,
1.0), (0.52020204067230225, 1.0, 1.0), (0.52525252103805542, 1.0, 1.0),
(0.53030300140380859, 0.99215686321258545, 0.99215686321258545),
(0.53535354137420654, 0.98039215803146362, 0.98039215803146362),
(0.54040402173995972, 0.96470588445663452, 0.96470588445663452),
(0.54545456171035767, 0.94901961088180542, 0.94901961088180542),
(0.55050504207611084, 0.93333333730697632, 0.93333333730697632),
(0.55555558204650879, 0.91764706373214722, 0.91764706373214722),
(0.56060606241226196, 0.90588235855102539, 0.90588235855102539),
(0.56565654277801514, 0.89019608497619629, 0.89019608497619629),
(0.57070708274841309, 0.87450981140136719, 0.87450981140136719),
(0.57575756311416626, 0.85882353782653809, 0.85882353782653809),
(0.58080810308456421, 0.84313726425170898, 0.84313726425170898),
(0.58585858345031738, 0.83137255907058716, 0.83137255907058716),
(0.59090906381607056, 0.81960785388946533, 0.81960785388946533),
(0.59595960378646851, 0.81176471710205078, 0.81176471710205078),
(0.60101008415222168, 0.80000001192092896, 0.80000001192092896),
(0.60606062412261963, 0.78823530673980713, 0.78823530673980713),
(0.6111111044883728, 0.7764706015586853, 0.7764706015586853),
(0.61616164445877075, 0.76470589637756348, 0.76470589637756348),
(0.62121212482452393, 0.75294119119644165, 0.75294119119644165),
(0.6262626051902771, 0.74117648601531982, 0.74117648601531982),
(0.63131314516067505, 0.729411780834198, 0.729411780834198),
(0.63636362552642822, 0.70980393886566162, 0.70980393886566162),
(0.64141416549682617, 0.66666668653488159, 0.66666668653488159),
(0.64646464586257935, 0.62352943420410156, 0.62352943420410156),
(0.65151512622833252, 0.58039218187332153, 0.58039218187332153),
(0.65656566619873047, 0.5372549295425415, 0.5372549295425415),
(0.66161614656448364, 0.49411764740943909, 0.49411764740943909),
(0.66666668653488159, 0.45098039507865906, 0.45098039507865906),
(0.67171716690063477, 0.40392157435417175, 0.40392157435417175),
(0.67676764726638794, 0.36078432202339172, 0.36078432202339172),
(0.68181818723678589, 0.31764706969261169, 0.31764706969261169),
(0.68686866760253906, 0.27450981736183167, 0.27450981736183167),
(0.69191920757293701, 0.24705882370471954, 0.24705882370471954),
(0.69696968793869019, 0.21960784494876862, 0.21960784494876862),
(0.70202022790908813, 0.19607843458652496, 0.19607843458652496),
(0.70707070827484131, 0.16862745583057404, 0.16862745583057404),
(0.71212118864059448, 0.14509804546833038, 0.14509804546833038),
(0.71717172861099243, 0.11764705926179886, 0.11764705926179886),
(0.72222220897674561, 0.090196080505847931, 0.090196080505847931),
(0.72727274894714355, 0.066666670143604279, 0.066666670143604279),
(0.73232322931289673, 0.039215687662363052, 0.039215687662363052),
(0.7373737096786499, 0.015686275437474251, 0.015686275437474251),
(0.74242424964904785, 0.0, 0.0), (0.74747473001480103, 0.0, 0.0),
(0.75252526998519897, 0.0, 0.0), (0.75757575035095215, 0.0, 0.0),
(0.7626262903213501, 0.0, 0.0), (0.76767677068710327, 0.0, 0.0),
(0.77272725105285645, 0.0, 0.0), (0.77777779102325439, 0.0, 0.0),
(0.78282827138900757, 0.0, 0.0), (0.78787881135940552, 0.0, 0.0),
(0.79292929172515869, 0.0, 0.0), (0.79797977209091187,
0.015686275437474251, 0.015686275437474251), (0.80303031206130981,
0.031372550874948502, 0.031372550874948502), (0.80808079242706299,
0.050980392843484879, 0.050980392843484879), (0.81313133239746094,
0.066666670143604279, 0.066666670143604279), (0.81818181276321411,
0.086274512112140656, 0.086274512112140656), (0.82323235273361206,
0.10588235408067703, 0.10588235408067703), (0.82828283309936523,
0.12156862765550613, 0.12156862765550613), (0.83333331346511841,
0.14117647707462311, 0.14117647707462311), (0.83838385343551636,
0.15686275064945221, 0.15686275064945221), (0.84343433380126953,
0.17647059261798859, 0.17647059261798859), (0.84848487377166748,
0.20000000298023224, 0.20000000298023224), (0.85353535413742065,
0.23137255012989044, 0.23137255012989044), (0.85858583450317383,
0.25882354378700256, 0.25882354378700256), (0.86363637447357178,
0.29019609093666077, 0.29019609093666077), (0.86868685483932495,
0.32156863808631897, 0.32156863808631897), (0.8737373948097229,
0.35294118523597717, 0.35294118523597717), (0.87878787517547607,
0.38431373238563538, 0.38431373238563538), (0.88383835554122925,
0.41568627953529358, 0.41568627953529358), (0.8888888955116272,
0.44313725829124451, 0.44313725829124451), (0.89393937587738037,
0.47450980544090271, 0.47450980544090271), (0.89898991584777832,
0.5058823823928833, 0.5058823823928833), (0.90404039621353149,
0.52941179275512695, 0.52941179275512695), (0.90909093618392944,
0.55294120311737061, 0.55294120311737061), (0.91414141654968262,
0.57254904508590698, 0.57254904508590698), (0.91919189691543579,
0.59607845544815063, 0.59607845544815063), (0.92424243688583374,
0.61960786581039429, 0.61960786581039429), (0.92929291725158691,
0.64313727617263794, 0.64313727617263794), (0.93434345722198486,
0.66274511814117432, 0.66274511814117432), (0.93939393758773804,
0.68627452850341797, 0.68627452850341797), (0.94444441795349121,
0.70980393886566162, 0.70980393886566162), (0.94949495792388916,
0.729411780834198, 0.729411780834198), (0.95454543828964233,
0.75294119119644165, 0.75294119119644165), (0.95959597826004028,
0.78039216995239258, 0.78039216995239258), (0.96464645862579346,
0.80392158031463623, 0.80392158031463623), (0.96969699859619141,
0.82745099067687988, 0.82745099067687988), (0.97474747896194458,
0.85098040103912354, 0.85098040103912354), (0.97979795932769775,
0.87450981140136719, 0.87450981140136719), (0.9848484992980957,
0.90196079015731812, 0.90196079015731812), (0.98989897966384888,
0.92549020051956177, 0.92549020051956177), (0.99494951963424683,
0.94901961088180542, 0.94901961088180542), (1.0, 0.97254902124404907,
0.97254902124404907)], 'red': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.0, 0.0), (0.010101010091602802, 0.0, 0.0), (0.015151515603065491, 0.0,
0.0), (0.020202020183205605, 0.0, 0.0), (0.025252524763345718, 0.0, 0.0),
(0.030303031206130981, 0.0, 0.0), (0.035353533923625946, 0.0, 0.0),
(0.040404040366411209, 0.0, 0.0), (0.045454546809196472, 0.0, 0.0),
(0.050505049526691437, 0.0, 0.0), (0.0555555559694767, 0.0, 0.0),
(0.060606062412261963, 0.0, 0.0), (0.065656565129756927, 0.0, 0.0),
(0.070707067847251892, 0.0, 0.0), (0.075757578015327454, 0.0, 0.0),
(0.080808080732822418, 0.0, 0.0), (0.085858583450317383, 0.0, 0.0),
(0.090909093618392944, 0.0, 0.0), (0.095959596335887909, 0.0, 0.0),
(0.10101009905338287, 0.0, 0.0), (0.10606060922145844, 0.0, 0.0),
(0.1111111119389534, 0.0, 0.0), (0.11616161465644836, 0.0, 0.0),
(0.12121212482452393, 0.0, 0.0), (0.12626262009143829, 0.0, 0.0),
(0.13131313025951385, 0.0, 0.0), (0.13636364042758942, 0.0, 0.0),
(0.14141413569450378, 0.0, 0.0), (0.14646464586257935, 0.0, 0.0),
(0.15151515603065491, 0.0, 0.0), (0.15656565129756927, 0.0, 0.0),
(0.16161616146564484, 0.0, 0.0), (0.1666666716337204, 0.0, 0.0),
(0.17171716690063477, 0.0, 0.0), (0.17676767706871033, 0.0, 0.0),
(0.18181818723678589, 0.0, 0.0), (0.18686868250370026, 0.0, 0.0),
(0.19191919267177582, 0.0, 0.0), (0.19696970283985138, 0.0, 0.0),
(0.20202019810676575, 0.0, 0.0), (0.20707070827484131, 0.0, 0.0),
(0.21212121844291687, 0.0, 0.0), (0.21717171370983124, 0.0, 0.0),
(0.2222222238779068, 0.0, 0.0), (0.22727273404598236, 0.0, 0.0),
(0.23232322931289673, 0.0, 0.0), (0.23737373948097229, 0.0, 0.0),
(0.24242424964904785, 0.0, 0.0), (0.24747474491596222, 0.0, 0.0),
(0.25252524018287659, 0.0, 0.0), (0.25757575035095215, 0.0, 0.0),
(0.26262626051902771, 0.0, 0.0), (0.26767677068710327, 0.0, 0.0),
(0.27272728085517883, 0.0, 0.0), (0.27777779102325439, 0.0, 0.0),
(0.28282827138900757, 0.0, 0.0), (0.28787878155708313, 0.0, 0.0),
(0.29292929172515869, 0.0, 0.0), (0.29797980189323425, 0.0, 0.0),
(0.30303031206130981, 0.0, 0.0), (0.30808082222938538, 0.0, 0.0),
(0.31313130259513855, 0.0, 0.0), (0.31818181276321411,
0.0039215688593685627, 0.0039215688593685627), (0.32323232293128967,
0.043137256056070328, 0.043137256056070328), (0.32828283309936523,
0.08235294371843338, 0.08235294371843338), (0.3333333432674408,
0.11764705926179886, 0.11764705926179886), (0.33838382363319397,
0.15686275064945221, 0.15686275064945221), (0.34343433380126953,
0.19607843458652496, 0.19607843458652496), (0.34848484396934509,
0.23137255012989044, 0.23137255012989044), (0.35353535413742065,
0.27058824896812439, 0.27058824896812439), (0.35858586430549622,
0.30980393290519714, 0.30980393290519714), (0.36363637447357178,
0.3490196168422699, 0.3490196168422699), (0.36868685483932495,
0.38431373238563538, 0.38431373238563538), (0.37373736500740051,
0.40392157435417175, 0.40392157435417175), (0.37878787517547607,
0.41568627953529358, 0.41568627953529358), (0.38383838534355164,
0.42352941632270813, 0.42352941632270813), (0.3888888955116272,
0.43137255311012268, 0.43137255311012268), (0.39393940567970276,
0.44313725829124451, 0.44313725829124451), (0.39898988604545593,
0.45098039507865906, 0.45098039507865906), (0.40404039621353149,
0.45882353186607361, 0.45882353186607361), (0.40909090638160706,
0.47058823704719543, 0.47058823704719543), (0.41414141654968262,
0.47843137383460999, 0.47843137383460999), (0.41919192671775818,
0.49019607901573181, 0.49019607901573181), (0.42424243688583374,
0.50196081399917603, 0.50196081399917603), (0.42929291725158691,
0.52549022436141968, 0.52549022436141968), (0.43434342741966248,
0.54901963472366333, 0.54901963472366333), (0.43939393758773804,
0.57254904508590698, 0.57254904508590698), (0.4444444477558136,
0.60000002384185791, 0.60000002384185791), (0.44949495792388916,
0.62352943420410156, 0.62352943420410156), (0.45454546809196472,
0.64705884456634521, 0.64705884456634521), (0.4595959484577179,
0.67058825492858887, 0.67058825492858887), (0.46464645862579346,
0.69411766529083252, 0.69411766529083252), (0.46969696879386902,
0.72156864404678345, 0.72156864404678345), (0.47474747896194458,
0.7450980544090271, 0.7450980544090271), (0.47979798913002014,
0.76862746477127075, 0.76862746477127075), (0.4848484992980957,
0.7921568751335144, 0.7921568751335144), (0.48989897966384888,
0.81568628549575806, 0.81568628549575806), (0.49494948983192444,
0.83921569585800171, 0.83921569585800171), (0.5, 0.86274510622024536,
0.86274510622024536), (0.50505048036575317, 0.88627451658248901,
0.88627451658248901), (0.51010102033615112, 0.90980392694473267,
0.90980392694473267), (0.5151515007019043, 0.93333333730697632,
0.93333333730697632), (0.52020204067230225, 0.95686274766921997,
0.95686274766921997), (0.52525252103805542, 0.98039215803146362,
0.98039215803146362), (0.53030300140380859, 1.0, 1.0),
(0.53535354137420654, 1.0, 1.0), (0.54040402173995972, 1.0, 1.0),
(0.54545456171035767, 1.0, 1.0), (0.55050504207611084, 1.0, 1.0),
(0.55555558204650879, 1.0, 1.0), (0.56060606241226196, 1.0, 1.0),
(0.56565654277801514, 1.0, 1.0), (0.57070708274841309, 1.0, 1.0),
(0.57575756311416626, 1.0, 1.0), (0.58080810308456421, 1.0, 1.0),
(0.58585858345031738, 1.0, 1.0), (0.59090906381607056, 1.0, 1.0),
(0.59595960378646851, 1.0, 1.0), (0.60101008415222168, 1.0, 1.0),
(0.60606062412261963, 1.0, 1.0), (0.6111111044883728, 1.0, 1.0),
(0.61616164445877075, 1.0, 1.0), (0.62121212482452393, 1.0, 1.0),
(0.6262626051902771, 1.0, 1.0), (0.63131314516067505, 1.0, 1.0),
(0.63636362552642822, 1.0, 1.0), (0.64141416549682617, 1.0, 1.0),
(0.64646464586257935, 1.0, 1.0), (0.65151512622833252, 1.0, 1.0),
(0.65656566619873047, 1.0, 1.0), (0.66161614656448364, 1.0, 1.0),
(0.66666668653488159, 1.0, 1.0), (0.67171716690063477, 1.0, 1.0),
(0.67676764726638794, 1.0, 1.0), (0.68181818723678589, 1.0, 1.0),
(0.68686866760253906, 1.0, 1.0), (0.69191920757293701, 1.0, 1.0),
(0.69696968793869019, 1.0, 1.0), (0.70202022790908813, 1.0, 1.0),
(0.70707070827484131, 1.0, 1.0), (0.71212118864059448, 1.0, 1.0),
(0.71717172861099243, 1.0, 1.0), (0.72222220897674561, 1.0, 1.0),
(0.72727274894714355, 1.0, 1.0), (0.73232322931289673, 1.0, 1.0),
(0.7373737096786499, 1.0, 1.0), (0.74242424964904785, 1.0, 1.0),
(0.74747473001480103, 1.0, 1.0), (0.75252526998519897, 1.0, 1.0),
(0.75757575035095215, 1.0, 1.0), (0.7626262903213501, 1.0, 1.0),
(0.76767677068710327, 1.0, 1.0), (0.77272725105285645, 1.0, 1.0),
(0.77777779102325439, 1.0, 1.0), (0.78282827138900757, 1.0, 1.0),
(0.78787881135940552, 1.0, 1.0), (0.79292929172515869, 1.0, 1.0),
(0.79797977209091187, 0.96470588445663452, 0.96470588445663452),
(0.80303031206130981, 0.92549020051956177, 0.92549020051956177),
(0.80808079242706299, 0.89019608497619629, 0.89019608497619629),
(0.81313133239746094, 0.85098040103912354, 0.85098040103912354),
(0.81818181276321411, 0.81568628549575806, 0.81568628549575806),
(0.82323235273361206, 0.7764706015586853, 0.7764706015586853),
(0.82828283309936523, 0.74117648601531982, 0.74117648601531982),
(0.83333331346511841, 0.70196080207824707, 0.70196080207824707),
(0.83838385343551636, 0.66666668653488159, 0.66666668653488159),
(0.84343433380126953, 0.62745100259780884, 0.62745100259780884),
(0.84848487377166748, 0.61960786581039429, 0.61960786581039429),
(0.85353535413742065, 0.65098041296005249, 0.65098041296005249),
(0.85858583450317383, 0.68235296010971069, 0.68235296010971069),
(0.86363637447357178, 0.7137255072593689, 0.7137255072593689),
(0.86868685483932495, 0.7450980544090271, 0.7450980544090271),
(0.8737373948097229, 0.77254903316497803, 0.77254903316497803),
(0.87878787517547607, 0.80392158031463623, 0.80392158031463623),
(0.88383835554122925, 0.83529412746429443, 0.83529412746429443),
(0.8888888955116272, 0.86666667461395264, 0.86666667461395264),
(0.89393937587738037, 0.89803922176361084, 0.89803922176361084),
(0.89898991584777832, 0.92941176891326904, 0.92941176891326904),
(0.90404039621353149, 0.93333333730697632, 0.93333333730697632),
(0.90909093618392944, 0.93725490570068359, 0.93725490570068359),
(0.91414141654968262, 0.93725490570068359, 0.93725490570068359),
(0.91919189691543579, 0.94117647409439087, 0.94117647409439087),
(0.92424243688583374, 0.94509804248809814, 0.94509804248809814),
(0.92929291725158691, 0.94509804248809814, 0.94509804248809814),
(0.93434345722198486, 0.94901961088180542, 0.94901961088180542),
(0.93939393758773804, 0.9529411792755127, 0.9529411792755127),
(0.94444441795349121, 0.9529411792755127, 0.9529411792755127),
(0.94949495792388916, 0.95686274766921997, 0.95686274766921997),
(0.95454543828964233, 0.96078431606292725, 0.96078431606292725),
(0.95959597826004028, 0.96470588445663452, 0.96470588445663452),
(0.96464645862579346, 0.9686274528503418, 0.9686274528503418),
(0.96969699859619141, 0.97254902124404907, 0.97254902124404907),
(0.97474747896194458, 0.97647058963775635, 0.97647058963775635),
(0.97979795932769775, 0.98039215803146362, 0.98039215803146362),
(0.9848484992980957, 0.9843137264251709, 0.9843137264251709),
(0.98989897966384888, 0.98823529481887817, 0.98823529481887817),
(0.99494951963424683, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)]}
_gist_rainbow_data = {'blue':
[(0.0, 0.16470588743686676, 0.16470588743686676), (0.0042016808874905109,
0.14117647707462311, 0.14117647707462311), (0.0084033617749810219,
0.12156862765550613, 0.12156862765550613), (0.012605042196810246,
0.10196078568696976, 0.10196078568696976), (0.016806723549962044,
0.078431375324726105, 0.078431375324726105), (0.021008403971791267,
0.058823529630899429, 0.058823529630899429), (0.025210084393620491,
0.039215687662363052, 0.039215687662363052), (0.029411764815449715,
0.015686275437474251, 0.015686275437474251), (0.033613447099924088, 0.0,
0.0), (0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0039215688593685627, 0.0039215688593685627),
(0.4117647111415863, 0.047058824449777603, 0.047058824449777603),
(0.41596639156341553, 0.066666670143604279, 0.066666670143604279),
(0.42016807198524475, 0.090196080505847931, 0.090196080505847931),
(0.42436975240707397, 0.10980392247438431, 0.10980392247438431),
(0.4285714328289032, 0.12941177189350128, 0.12941177189350128),
(0.43277311325073242, 0.15294118225574493, 0.15294118225574493),
(0.43697479367256165, 0.17254902422428131, 0.17254902422428131),
(0.44117647409439087, 0.19215686619281769, 0.19215686619281769),
(0.44537815451622009, 0.21568627655506134, 0.21568627655506134),
(0.44957983493804932, 0.23529411852359772, 0.23529411852359772),
(0.45378151535987854, 0.25882354378700256, 0.25882354378700256),
(0.45798319578170776, 0.27843138575553894, 0.27843138575553894),
(0.46218487620353699, 0.29803922772407532, 0.29803922772407532),
(0.46638655662536621, 0.32156863808631897, 0.32156863808631897),
(0.47058823704719543, 0.34117648005485535, 0.34117648005485535),
(0.47478991746902466, 0.38431373238563538, 0.38431373238563538),
(0.47899159789085388, 0.40392157435417175, 0.40392157435417175),
(0.48319327831268311, 0.42745098471641541, 0.42745098471641541),
(0.48739495873451233, 0.44705882668495178, 0.44705882668495178),
(0.49159663915634155, 0.46666666865348816, 0.46666666865348816),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.50980395078659058, 0.50980395078659058), (0.50420171022415161,
0.52941179275512695, 0.52941179275512695), (0.50840336084365845,
0.55294120311737061, 0.55294120311737061), (0.51260507106781006,
0.57254904508590698, 0.57254904508590698), (0.51680672168731689,
0.59607845544815063, 0.59607845544815063), (0.52100843191146851,
0.61568629741668701, 0.61568629741668701), (0.52521008253097534,
0.63529413938522339, 0.63529413938522339), (0.52941179275512695,
0.65882354974746704, 0.65882354974746704), (0.53361344337463379,
0.67843139171600342, 0.67843139171600342), (0.5378151535987854,
0.72156864404678345, 0.72156864404678345), (0.54201680421829224,
0.74117648601531982, 0.74117648601531982), (0.54621851444244385,
0.76470589637756348, 0.76470589637756348), (0.55042016506195068,
0.78431373834609985, 0.78431373834609985), (0.55462187528610229,
0.80392158031463623, 0.80392158031463623), (0.55882352590560913,
0.82745099067687988, 0.82745099067687988), (0.56302523612976074,
0.84705883264541626, 0.84705883264541626), (0.56722688674926758,
0.87058824300765991, 0.87058824300765991), (0.57142859697341919,
0.89019608497619629, 0.89019608497619629), (0.57563024759292603,
0.90980392694473267, 0.90980392694473267), (0.57983195781707764,
0.93333333730697632, 0.93333333730697632), (0.58403360843658447,
0.9529411792755127, 0.9529411792755127), (0.58823531866073608,
0.97254902124404907, 0.97254902124404907), (0.59243696928024292,
0.99607843160629272, 0.99607843160629272), (0.59663867950439453, 1.0,
1.0), (0.60084033012390137, 1.0, 1.0), (0.60504204034805298, 1.0, 1.0),
(0.60924369096755981, 1.0, 1.0), (0.61344540119171143, 1.0, 1.0),
(0.61764705181121826, 1.0, 1.0), (0.62184876203536987, 1.0, 1.0),
(0.62605041265487671, 1.0, 1.0), (0.63025212287902832, 1.0, 1.0),
(0.63445377349853516, 1.0, 1.0), (0.63865548372268677, 1.0, 1.0),
(0.6428571343421936, 1.0, 1.0), (0.64705884456634521, 1.0, 1.0),
(0.65126049518585205, 1.0, 1.0), (0.65546220541000366, 1.0, 1.0),
(0.6596638560295105, 1.0, 1.0), (0.66386556625366211, 1.0, 1.0),
(0.66806721687316895, 1.0, 1.0), (0.67226892709732056, 1.0, 1.0),
(0.67647057771682739, 1.0, 1.0), (0.680672287940979, 1.0, 1.0),
(0.68487393856048584, 1.0, 1.0), (0.68907564878463745, 1.0, 1.0),
(0.69327729940414429, 1.0, 1.0), (0.6974790096282959, 1.0, 1.0),
(0.70168066024780273, 1.0, 1.0), (0.70588237047195435, 1.0, 1.0),
(0.71008402109146118, 1.0, 1.0), (0.71428573131561279, 1.0, 1.0),
(0.71848738193511963, 1.0, 1.0), (0.72268909215927124, 1.0, 1.0),
(0.72689074277877808, 1.0, 1.0), (0.73109245300292969, 1.0, 1.0),
(0.73529410362243652, 1.0, 1.0), (0.73949581384658813, 1.0, 1.0),
(0.74369746446609497, 1.0, 1.0), (0.74789917469024658, 1.0, 1.0),
(0.75210082530975342, 1.0, 1.0), (0.75630253553390503, 1.0, 1.0),
(0.76050418615341187, 1.0, 1.0), (0.76470589637756348, 1.0, 1.0),
(0.76890754699707031, 1.0, 1.0), (0.77310925722122192, 1.0, 1.0),
(0.77731090784072876, 1.0, 1.0), (0.78151261806488037, 1.0, 1.0),
(0.78571426868438721, 1.0, 1.0), (0.78991597890853882, 1.0, 1.0),
(0.79411762952804565, 1.0, 1.0), (0.79831933975219727, 1.0, 1.0),
(0.8025209903717041, 1.0, 1.0), (0.80672270059585571, 1.0, 1.0),
(0.81092435121536255, 1.0, 1.0), (0.81512606143951416, 1.0, 1.0),
(0.819327712059021, 1.0, 1.0), (0.82352942228317261, 1.0, 1.0),
(0.82773107290267944, 1.0, 1.0), (0.83193278312683105, 1.0, 1.0),
(0.83613443374633789, 1.0, 1.0), (0.8403361439704895, 1.0, 1.0),
(0.84453779458999634, 1.0, 1.0), (0.84873950481414795, 1.0, 1.0),
(0.85294115543365479, 1.0, 1.0), (0.8571428656578064, 1.0, 1.0),
(0.86134451627731323, 1.0, 1.0), (0.86554622650146484, 1.0, 1.0),
(0.86974787712097168, 1.0, 1.0), (0.87394958734512329, 1.0, 1.0),
(0.87815123796463013, 1.0, 1.0), (0.88235294818878174, 1.0, 1.0),
(0.88655459880828857, 1.0, 1.0), (0.89075630903244019, 1.0, 1.0),
(0.89495795965194702, 1.0, 1.0), (0.89915966987609863, 1.0, 1.0),
(0.90336132049560547, 1.0, 1.0), (0.90756303071975708, 1.0, 1.0),
(0.91176468133926392, 1.0, 1.0), (0.91596639156341553, 1.0, 1.0),
(0.92016804218292236, 1.0, 1.0), (0.92436975240707397, 1.0, 1.0),
(0.92857140302658081, 1.0, 1.0), (0.93277311325073242, 1.0, 1.0),
(0.93697476387023926, 1.0, 1.0), (0.94117647409439087, 1.0, 1.0),
(0.94537812471389771, 1.0, 1.0), (0.94957983493804932, 1.0, 1.0),
(0.95378148555755615, 1.0, 1.0), (0.95798319578170776, 1.0, 1.0),
(0.9621848464012146, 1.0, 1.0), (0.96638655662536621, 0.99607843160629272,
0.99607843160629272), (0.97058820724487305, 0.97647058963775635,
0.97647058963775635), (0.97478991746902466, 0.9529411792755127,
0.9529411792755127), (0.97899156808853149, 0.91372549533843994,
0.91372549533843994), (0.98319327831268311, 0.89019608497619629,
0.89019608497619629), (0.98739492893218994, 0.87058824300765991,
0.87058824300765991), (0.99159663915634155, 0.85098040103912354,
0.85098040103912354), (0.99579828977584839, 0.82745099067687988,
0.82745099067687988), (1.0, 0.80784314870834351, 0.80784314870834351)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.019607843831181526, 0.019607843831181526),
(0.037815127521753311, 0.043137256056070328, 0.043137256056070328),
(0.042016807943582535, 0.062745101749897003, 0.062745101749897003),
(0.046218488365411758, 0.086274512112140656, 0.086274512112140656),
(0.050420168787240982, 0.10588235408067703, 0.10588235408067703),
(0.054621849209070206, 0.12549020349979401, 0.12549020349979401),
(0.058823529630899429, 0.14901961386203766, 0.14901961386203766),
(0.063025213778018951, 0.16862745583057404, 0.16862745583057404),
(0.067226894199848175, 0.18823529779911041, 0.18823529779911041),
(0.071428574621677399, 0.21176470816135406, 0.21176470816135406),
(0.075630255043506622, 0.23137255012989044, 0.23137255012989044),
(0.079831935465335846, 0.25490197539329529, 0.25490197539329529),
(0.08403361588716507, 0.27450981736183167, 0.27450981736183167),
(0.088235296308994293, 0.29411765933036804, 0.29411765933036804),
(0.092436976730823517, 0.31764706969261169, 0.31764706969261169),
(0.09663865715265274, 0.35686275362968445, 0.35686275362968445),
(0.10084033757448196, 0.3803921639919281, 0.3803921639919281),
(0.10504201799631119, 0.40000000596046448, 0.40000000596046448),
(0.10924369841814041, 0.42352941632270813, 0.42352941632270813),
(0.11344537883996964, 0.44313725829124451, 0.44313725829124451),
(0.11764705926179886, 0.46274510025978088, 0.46274510025978088),
(0.12184873968362808, 0.48627451062202454, 0.48627451062202454),
(0.1260504275560379, 0.5058823823928833, 0.5058823823928833),
(0.13025210797786713, 0.52941179275512695, 0.52941179275512695),
(0.13445378839969635, 0.54901963472366333, 0.54901963472366333),
(0.13865546882152557, 0.56862747669219971, 0.56862747669219971),
(0.1428571492433548, 0.59215688705444336, 0.59215688705444336),
(0.14705882966518402, 0.61176472902297974, 0.61176472902297974),
(0.15126051008701324, 0.63137257099151611, 0.63137257099151611),
(0.15546219050884247, 0.65490198135375977, 0.65490198135375977),
(0.15966387093067169, 0.69803923368453979, 0.69803923368453979),
(0.16386555135250092, 0.71764707565307617, 0.71764707565307617),
(0.16806723177433014, 0.73725491762161255, 0.73725491762161255),
(0.17226891219615936, 0.7607843279838562, 0.7607843279838562),
(0.17647059261798859, 0.78039216995239258, 0.78039216995239258),
(0.18067227303981781, 0.80000001192092896, 0.80000001192092896),
(0.18487395346164703, 0.82352942228317261, 0.82352942228317261),
(0.18907563388347626, 0.84313726425170898, 0.84313726425170898),
(0.19327731430530548, 0.86666667461395264, 0.86666667461395264),
(0.1974789947271347, 0.88627451658248901, 0.88627451658248901),
(0.20168067514896393, 0.90588235855102539, 0.90588235855102539),
(0.20588235557079315, 0.92941176891326904, 0.92941176891326904),
(0.21008403599262238, 0.94901961088180542, 0.94901961088180542),
(0.2142857164144516, 0.9686274528503418, 0.9686274528503418),
(0.21848739683628082, 0.99215686321258545, 0.99215686321258545),
(0.22268907725811005, 1.0, 1.0), (0.22689075767993927, 1.0, 1.0),
(0.23109243810176849, 1.0, 1.0), (0.23529411852359772, 1.0, 1.0),
(0.23949579894542694, 1.0, 1.0), (0.24369747936725616, 1.0, 1.0),
(0.24789915978908539, 1.0, 1.0), (0.25210085511207581, 1.0, 1.0),
(0.25630253553390503, 1.0, 1.0), (0.26050421595573425, 1.0, 1.0),
(0.26470589637756348, 1.0, 1.0), (0.2689075767993927, 1.0, 1.0),
(0.27310925722122192, 1.0, 1.0), (0.27731093764305115, 1.0, 1.0),
(0.28151261806488037, 1.0, 1.0), (0.28571429848670959, 1.0, 1.0),
(0.28991597890853882, 1.0, 1.0), (0.29411765933036804, 1.0, 1.0),
(0.29831933975219727, 1.0, 1.0), (0.30252102017402649, 1.0, 1.0),
(0.30672270059585571, 1.0, 1.0), (0.31092438101768494, 1.0, 1.0),
(0.31512606143951416, 1.0, 1.0), (0.31932774186134338, 1.0, 1.0),
(0.32352942228317261, 1.0, 1.0), (0.32773110270500183, 1.0, 1.0),
(0.33193278312683105, 1.0, 1.0), (0.33613446354866028, 1.0, 1.0),
(0.3403361439704895, 1.0, 1.0), (0.34453782439231873, 1.0, 1.0),
(0.34873950481414795, 1.0, 1.0), (0.35294118523597717, 1.0, 1.0),
(0.3571428656578064, 1.0, 1.0), (0.36134454607963562, 1.0, 1.0),
(0.36554622650146484, 1.0, 1.0), (0.36974790692329407, 1.0, 1.0),
(0.37394958734512329, 1.0, 1.0), (0.37815126776695251, 1.0, 1.0),
(0.38235294818878174, 1.0, 1.0), (0.38655462861061096, 1.0, 1.0),
(0.39075630903244019, 1.0, 1.0), (0.39495798945426941, 1.0, 1.0),
(0.39915966987609863, 1.0, 1.0), (0.40336135029792786, 1.0, 1.0),
(0.40756303071975708, 1.0, 1.0), (0.4117647111415863, 1.0, 1.0),
(0.41596639156341553, 1.0, 1.0), (0.42016807198524475, 1.0, 1.0),
(0.42436975240707397, 1.0, 1.0), (0.4285714328289032, 1.0, 1.0),
(0.43277311325073242, 1.0, 1.0), (0.43697479367256165, 1.0, 1.0),
(0.44117647409439087, 1.0, 1.0), (0.44537815451622009, 1.0, 1.0),
(0.44957983493804932, 1.0, 1.0), (0.45378151535987854, 1.0, 1.0),
(0.45798319578170776, 1.0, 1.0), (0.46218487620353699, 1.0, 1.0),
(0.46638655662536621, 1.0, 1.0), (0.47058823704719543, 1.0, 1.0),
(0.47478991746902466, 1.0, 1.0), (0.47899159789085388, 1.0, 1.0),
(0.48319327831268311, 1.0, 1.0), (0.48739495873451233, 1.0, 1.0),
(0.49159663915634155, 1.0, 1.0), (0.49579831957817078, 1.0, 1.0), (0.5,
1.0, 1.0), (0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 1.0,
1.0), (0.51260507106781006, 1.0, 1.0), (0.51680672168731689, 1.0, 1.0),
(0.52100843191146851, 1.0, 1.0), (0.52521008253097534, 1.0, 1.0),
(0.52941179275512695, 1.0, 1.0), (0.53361344337463379, 1.0, 1.0),
(0.5378151535987854, 1.0, 1.0), (0.54201680421829224, 1.0, 1.0),
(0.54621851444244385, 1.0, 1.0), (0.55042016506195068, 1.0, 1.0),
(0.55462187528610229, 1.0, 1.0), (0.55882352590560913, 1.0, 1.0),
(0.56302523612976074, 1.0, 1.0), (0.56722688674926758, 1.0, 1.0),
(0.57142859697341919, 1.0, 1.0), (0.57563024759292603, 1.0, 1.0),
(0.57983195781707764, 1.0, 1.0), (0.58403360843658447, 1.0, 1.0),
(0.58823531866073608, 1.0, 1.0), (0.59243696928024292, 1.0, 1.0),
(0.59663867950439453, 0.98039215803146362, 0.98039215803146362),
(0.60084033012390137, 0.93725490570068359, 0.93725490570068359),
(0.60504204034805298, 0.91764706373214722, 0.91764706373214722),
(0.60924369096755981, 0.89411765336990356, 0.89411765336990356),
(0.61344540119171143, 0.87450981140136719, 0.87450981140136719),
(0.61764705181121826, 0.85490196943283081, 0.85490196943283081),
(0.62184876203536987, 0.83137255907058716, 0.83137255907058716),
(0.62605041265487671, 0.81176471710205078, 0.81176471710205078),
(0.63025212287902832, 0.78823530673980713, 0.78823530673980713),
(0.63445377349853516, 0.76862746477127075, 0.76862746477127075),
(0.63865548372268677, 0.74901962280273438, 0.74901962280273438),
(0.6428571343421936, 0.72549021244049072, 0.72549021244049072),
(0.64705884456634521, 0.70588237047195435, 0.70588237047195435),
(0.65126049518585205, 0.68235296010971069, 0.68235296010971069),
(0.65546220541000366, 0.66274511814117432, 0.66274511814117432),
(0.6596638560295105, 0.64313727617263794, 0.64313727617263794),
(0.66386556625366211, 0.60000002384185791, 0.60000002384185791),
(0.66806721687316895, 0.58039218187332153, 0.58039218187332153),
(0.67226892709732056, 0.55686277151107788, 0.55686277151107788),
(0.67647057771682739, 0.5372549295425415, 0.5372549295425415),
(0.680672287940979, 0.51372551918029785, 0.51372551918029785),
(0.68487393856048584, 0.49411764740943909, 0.49411764740943909),
(0.68907564878463745, 0.47450980544090271, 0.47450980544090271),
(0.69327729940414429, 0.45098039507865906, 0.45098039507865906),
(0.6974790096282959, 0.43137255311012268, 0.43137255311012268),
(0.70168066024780273, 0.4117647111415863, 0.4117647111415863),
(0.70588237047195435, 0.38823530077934265, 0.38823530077934265),
(0.71008402109146118, 0.36862745881080627, 0.36862745881080627),
(0.71428573131561279, 0.34509804844856262, 0.34509804844856262),
(0.71848738193511963, 0.32549020648002625, 0.32549020648002625),
(0.72268909215927124, 0.30588236451148987, 0.30588236451148987),
(0.72689074277877808, 0.26274511218070984, 0.26274511218070984),
(0.73109245300292969, 0.24313725531101227, 0.24313725531101227),
(0.73529410362243652, 0.21960784494876862, 0.21960784494876862),
(0.73949581384658813, 0.20000000298023224, 0.20000000298023224),
(0.74369746446609497, 0.17647059261798859, 0.17647059261798859),
(0.74789917469024658, 0.15686275064945221, 0.15686275064945221),
(0.75210082530975342, 0.13725490868091583, 0.13725490868091583),
(0.75630253553390503, 0.11372549086809158, 0.11372549086809158),
(0.76050418615341187, 0.094117648899555206, 0.094117648899555206),
(0.76470589637756348, 0.070588238537311554, 0.070588238537311554),
(0.76890754699707031, 0.050980392843484879, 0.050980392843484879),
(0.77310925722122192, 0.031372550874948502, 0.031372550874948502),
(0.77731090784072876, 0.0078431377187371254, 0.0078431377187371254),
(0.78151261806488037, 0.0, 0.0), (0.78571426868438721, 0.0, 0.0),
(0.78991597890853882, 0.0, 0.0), (0.79411762952804565, 0.0, 0.0),
(0.79831933975219727, 0.0, 0.0), (0.8025209903717041, 0.0, 0.0),
(0.80672270059585571, 0.0, 0.0), (0.81092435121536255, 0.0, 0.0),
(0.81512606143951416, 0.0, 0.0), (0.819327712059021, 0.0, 0.0),
(0.82352942228317261, 0.0, 0.0), (0.82773107290267944, 0.0, 0.0),
(0.83193278312683105, 0.0, 0.0), (0.83613443374633789, 0.0, 0.0),
(0.8403361439704895, 0.0, 0.0), (0.84453779458999634, 0.0, 0.0),
(0.84873950481414795, 0.0, 0.0), (0.85294115543365479, 0.0, 0.0),
(0.8571428656578064, 0.0, 0.0), (0.86134451627731323, 0.0, 0.0),
(0.86554622650146484, 0.0, 0.0), (0.86974787712097168, 0.0, 0.0),
(0.87394958734512329, 0.0, 0.0), (0.87815123796463013, 0.0, 0.0),
(0.88235294818878174, 0.0, 0.0), (0.88655459880828857, 0.0, 0.0),
(0.89075630903244019, 0.0, 0.0), (0.89495795965194702, 0.0, 0.0),
(0.89915966987609863, 0.0, 0.0), (0.90336132049560547, 0.0, 0.0),
(0.90756303071975708, 0.0, 0.0), (0.91176468133926392, 0.0, 0.0),
(0.91596639156341553, 0.0, 0.0), (0.92016804218292236, 0.0, 0.0),
(0.92436975240707397, 0.0, 0.0), (0.92857140302658081, 0.0, 0.0),
(0.93277311325073242, 0.0, 0.0), (0.93697476387023926, 0.0, 0.0),
(0.94117647409439087, 0.0, 0.0), (0.94537812471389771, 0.0, 0.0),
(0.94957983493804932, 0.0, 0.0), (0.95378148555755615, 0.0, 0.0),
(0.95798319578170776, 0.0, 0.0), (0.9621848464012146, 0.0, 0.0),
(0.96638655662536621, 0.0, 0.0), (0.97058820724487305, 0.0, 0.0),
(0.97478991746902466, 0.0, 0.0), (0.97899156808853149, 0.0, 0.0),
(0.98319327831268311, 0.0, 0.0), (0.98739492893218994, 0.0, 0.0),
(0.99159663915634155, 0.0, 0.0), (0.99579828977584839, 0.0, 0.0), (1.0,
0.0, 0.0)], 'red': [(0.0, 1.0, 1.0), (0.0042016808874905109, 1.0, 1.0),
(0.0084033617749810219, 1.0, 1.0), (0.012605042196810246, 1.0, 1.0),
(0.016806723549962044, 1.0, 1.0), (0.021008403971791267, 1.0, 1.0),
(0.025210084393620491, 1.0, 1.0), (0.029411764815449715, 1.0, 1.0),
(0.033613447099924088, 1.0, 1.0), (0.037815127521753311, 1.0, 1.0),
(0.042016807943582535, 1.0, 1.0), (0.046218488365411758, 1.0, 1.0),
(0.050420168787240982, 1.0, 1.0), (0.054621849209070206, 1.0, 1.0),
(0.058823529630899429, 1.0, 1.0), (0.063025213778018951, 1.0, 1.0),
(0.067226894199848175, 1.0, 1.0), (0.071428574621677399, 1.0, 1.0),
(0.075630255043506622, 1.0, 1.0), (0.079831935465335846, 1.0, 1.0),
(0.08403361588716507, 1.0, 1.0), (0.088235296308994293, 1.0, 1.0),
(0.092436976730823517, 1.0, 1.0), (0.09663865715265274, 1.0, 1.0),
(0.10084033757448196, 1.0, 1.0), (0.10504201799631119, 1.0, 1.0),
(0.10924369841814041, 1.0, 1.0), (0.11344537883996964, 1.0, 1.0),
(0.11764705926179886, 1.0, 1.0), (0.12184873968362808, 1.0, 1.0),
(0.1260504275560379, 1.0, 1.0), (0.13025210797786713, 1.0, 1.0),
(0.13445378839969635, 1.0, 1.0), (0.13865546882152557, 1.0, 1.0),
(0.1428571492433548, 1.0, 1.0), (0.14705882966518402, 1.0, 1.0),
(0.15126051008701324, 1.0, 1.0), (0.15546219050884247, 1.0, 1.0),
(0.15966387093067169, 1.0, 1.0), (0.16386555135250092, 1.0, 1.0),
(0.16806723177433014, 1.0, 1.0), (0.17226891219615936, 1.0, 1.0),
(0.17647059261798859, 1.0, 1.0), (0.18067227303981781, 1.0, 1.0),
(0.18487395346164703, 1.0, 1.0), (0.18907563388347626, 1.0, 1.0),
(0.19327731430530548, 1.0, 1.0), (0.1974789947271347, 1.0, 1.0),
(0.20168067514896393, 1.0, 1.0), (0.20588235557079315, 1.0, 1.0),
(0.21008403599262238, 1.0, 1.0), (0.2142857164144516, 1.0, 1.0),
(0.21848739683628082, 1.0, 1.0), (0.22268907725811005,
0.96078431606292725, 0.96078431606292725), (0.22689075767993927,
0.94117647409439087, 0.94117647409439087), (0.23109243810176849,
0.92156863212585449, 0.92156863212585449), (0.23529411852359772,
0.89803922176361084, 0.89803922176361084), (0.23949579894542694,
0.87843137979507446, 0.87843137979507446), (0.24369747936725616,
0.85882353782653809, 0.85882353782653809), (0.24789915978908539,
0.83529412746429443, 0.83529412746429443), (0.25210085511207581,
0.81568628549575806, 0.81568628549575806), (0.25630253553390503,
0.7921568751335144, 0.7921568751335144), (0.26050421595573425,
0.77254903316497803, 0.77254903316497803), (0.26470589637756348,
0.75294119119644165, 0.75294119119644165), (0.2689075767993927,
0.729411780834198, 0.729411780834198), (0.27310925722122192,
0.70980393886566162, 0.70980393886566162), (0.27731093764305115,
0.68627452850341797, 0.68627452850341797), (0.28151261806488037,
0.66666668653488159, 0.66666668653488159), (0.28571429848670959,
0.62352943420410156, 0.62352943420410156), (0.28991597890853882,
0.60392159223556519, 0.60392159223556519), (0.29411765933036804,
0.58431375026702881, 0.58431375026702881), (0.29831933975219727,
0.56078433990478516, 0.56078433990478516), (0.30252102017402649,
0.54117649793624878, 0.54117649793624878), (0.30672270059585571,
0.51764708757400513, 0.51764708757400513), (0.31092438101768494,
0.49803921580314636, 0.49803921580314636), (0.31512606143951416,
0.47843137383460999, 0.47843137383460999), (0.31932774186134338,
0.45490196347236633, 0.45490196347236633), (0.32352942228317261,
0.43529412150382996, 0.43529412150382996), (0.32773110270500183,
0.41568627953529358, 0.41568627953529358), (0.33193278312683105,
0.39215686917304993, 0.39215686917304993), (0.33613446354866028,
0.37254902720451355, 0.37254902720451355), (0.3403361439704895,
0.3490196168422699, 0.3490196168422699), (0.34453782439231873,
0.32941177487373352, 0.32941177487373352), (0.34873950481414795,
0.28627452254295349, 0.28627452254295349), (0.35294118523597717,
0.26666668057441711, 0.26666668057441711), (0.3571428656578064,
0.24705882370471954, 0.24705882370471954), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.20392157137393951, 0.20392157137393951), (0.36974790692329407,
0.18039216101169586, 0.18039216101169586), (0.37394958734512329,
0.16078431904315948, 0.16078431904315948), (0.37815126776695251,
0.14117647707462311, 0.14117647707462311), (0.38235294818878174,
0.11764705926179886, 0.11764705926179886), (0.38655462861061096,
0.098039217293262482, 0.098039217293262482), (0.39075630903244019,
0.074509806931018829, 0.074509806931018829), (0.39495798945426941,
0.054901961237192154, 0.054901961237192154), (0.39915966987609863,
0.035294119268655777, 0.035294119268655777), (0.40336135029792786,
0.011764706112444401, 0.011764706112444401), (0.40756303071975708, 0.0,
0.0), (0.4117647111415863, 0.0, 0.0), (0.41596639156341553, 0.0, 0.0),
(0.42016807198524475, 0.0, 0.0), (0.42436975240707397, 0.0, 0.0),
(0.4285714328289032, 0.0, 0.0), (0.43277311325073242, 0.0, 0.0),
(0.43697479367256165, 0.0, 0.0), (0.44117647409439087, 0.0, 0.0),
(0.44537815451622009, 0.0, 0.0), (0.44957983493804932, 0.0, 0.0),
(0.45378151535987854, 0.0, 0.0), (0.45798319578170776, 0.0, 0.0),
(0.46218487620353699, 0.0, 0.0), (0.46638655662536621, 0.0, 0.0),
(0.47058823704719543, 0.0, 0.0), (0.47478991746902466, 0.0, 0.0),
(0.47899159789085388, 0.0, 0.0), (0.48319327831268311, 0.0, 0.0),
(0.48739495873451233, 0.0, 0.0), (0.49159663915634155, 0.0, 0.0),
(0.49579831957817078, 0.0, 0.0), (0.5, 0.0, 0.0), (0.50420171022415161,
0.0, 0.0), (0.50840336084365845, 0.0, 0.0), (0.51260507106781006, 0.0,
0.0), (0.51680672168731689, 0.0, 0.0), (0.52100843191146851, 0.0, 0.0),
(0.52521008253097534, 0.0, 0.0), (0.52941179275512695, 0.0, 0.0),
(0.53361344337463379, 0.0, 0.0), (0.5378151535987854, 0.0, 0.0),
(0.54201680421829224, 0.0, 0.0), (0.54621851444244385, 0.0, 0.0),
(0.55042016506195068, 0.0, 0.0), (0.55462187528610229, 0.0, 0.0),
(0.55882352590560913, 0.0, 0.0), (0.56302523612976074, 0.0, 0.0),
(0.56722688674926758, 0.0, 0.0), (0.57142859697341919, 0.0, 0.0),
(0.57563024759292603, 0.0, 0.0), (0.57983195781707764, 0.0, 0.0),
(0.58403360843658447, 0.0, 0.0), (0.58823531866073608, 0.0, 0.0),
(0.59243696928024292, 0.0, 0.0), (0.59663867950439453, 0.0, 0.0),
(0.60084033012390137, 0.0, 0.0), (0.60504204034805298, 0.0, 0.0),
(0.60924369096755981, 0.0, 0.0), (0.61344540119171143, 0.0, 0.0),
(0.61764705181121826, 0.0, 0.0), (0.62184876203536987, 0.0, 0.0),
(0.62605041265487671, 0.0, 0.0), (0.63025212287902832, 0.0, 0.0),
(0.63445377349853516, 0.0, 0.0), (0.63865548372268677, 0.0, 0.0),
(0.6428571343421936, 0.0, 0.0), (0.64705884456634521, 0.0, 0.0),
(0.65126049518585205, 0.0, 0.0), (0.65546220541000366, 0.0, 0.0),
(0.6596638560295105, 0.0, 0.0), (0.66386556625366211, 0.0, 0.0),
(0.66806721687316895, 0.0, 0.0), (0.67226892709732056, 0.0, 0.0),
(0.67647057771682739, 0.0, 0.0), (0.680672287940979, 0.0, 0.0),
(0.68487393856048584, 0.0, 0.0), (0.68907564878463745, 0.0, 0.0),
(0.69327729940414429, 0.0, 0.0), (0.6974790096282959, 0.0, 0.0),
(0.70168066024780273, 0.0, 0.0), (0.70588237047195435, 0.0, 0.0),
(0.71008402109146118, 0.0, 0.0), (0.71428573131561279, 0.0, 0.0),
(0.71848738193511963, 0.0, 0.0), (0.72268909215927124, 0.0, 0.0),
(0.72689074277877808, 0.0, 0.0), (0.73109245300292969, 0.0, 0.0),
(0.73529410362243652, 0.0, 0.0), (0.73949581384658813, 0.0, 0.0),
(0.74369746446609497, 0.0, 0.0), (0.74789917469024658, 0.0, 0.0),
(0.75210082530975342, 0.0, 0.0), (0.75630253553390503, 0.0, 0.0),
(0.76050418615341187, 0.0, 0.0), (0.76470589637756348, 0.0, 0.0),
(0.76890754699707031, 0.0, 0.0), (0.77310925722122192, 0.0, 0.0),
(0.77731090784072876, 0.0, 0.0), (0.78151261806488037,
0.0078431377187371254, 0.0078431377187371254), (0.78571426868438721,
0.027450980618596077, 0.027450980618596077), (0.78991597890853882,
0.070588238537311554, 0.070588238537311554), (0.79411762952804565,
0.094117648899555206, 0.094117648899555206), (0.79831933975219727,
0.11372549086809158, 0.11372549086809158), (0.8025209903717041,
0.13333334028720856, 0.13333334028720856), (0.80672270059585571,
0.15686275064945221, 0.15686275064945221), (0.81092435121536255,
0.17647059261798859, 0.17647059261798859), (0.81512606143951416,
0.19607843458652496, 0.19607843458652496), (0.819327712059021,
0.21960784494876862, 0.21960784494876862), (0.82352942228317261,
0.23921568691730499, 0.23921568691730499), (0.82773107290267944,
0.26274511218070984, 0.26274511218070984), (0.83193278312683105,
0.28235295414924622, 0.28235295414924622), (0.83613443374633789,
0.30196079611778259, 0.30196079611778259), (0.8403361439704895,
0.32549020648002625, 0.32549020648002625), (0.84453779458999634,
0.34509804844856262, 0.34509804844856262), (0.84873950481414795,
0.364705890417099, 0.364705890417099), (0.85294115543365479,
0.40784314274787903, 0.40784314274787903), (0.8571428656578064,
0.43137255311012268, 0.43137255311012268), (0.86134451627731323,
0.45098039507865906, 0.45098039507865906), (0.86554622650146484,
0.47058823704719543, 0.47058823704719543), (0.86974787712097168,
0.49411764740943909, 0.49411764740943909), (0.87394958734512329,
0.51372551918029785, 0.51372551918029785), (0.87815123796463013,
0.53333336114883423, 0.53333336114883423), (0.88235294818878174,
0.55686277151107788, 0.55686277151107788), (0.88655459880828857,
0.57647061347961426, 0.57647061347961426), (0.89075630903244019,
0.60000002384185791, 0.60000002384185791), (0.89495795965194702,
0.61960786581039429, 0.61960786581039429), (0.89915966987609863,
0.63921570777893066, 0.63921570777893066), (0.90336132049560547,
0.66274511814117432, 0.66274511814117432), (0.90756303071975708,
0.68235296010971069, 0.68235296010971069), (0.91176468133926392,
0.70588237047195435, 0.70588237047195435), (0.91596639156341553,
0.7450980544090271, 0.7450980544090271), (0.92016804218292236,
0.76862746477127075, 0.76862746477127075), (0.92436975240707397,
0.78823530673980713, 0.78823530673980713), (0.92857140302658081,
0.80784314870834351, 0.80784314870834351), (0.93277311325073242,
0.83137255907058716, 0.83137255907058716), (0.93697476387023926,
0.85098040103912354, 0.85098040103912354), (0.94117647409439087,
0.87450981140136719, 0.87450981140136719), (0.94537812471389771,
0.89411765336990356, 0.89411765336990356), (0.94957983493804932,
0.91372549533843994, 0.91372549533843994), (0.95378148555755615,
0.93725490570068359, 0.93725490570068359), (0.95798319578170776,
0.95686274766921997, 0.95686274766921997), (0.9621848464012146,
0.97647058963775635, 0.97647058963775635), (0.96638655662536621, 1.0,
1.0), (0.97058820724487305, 1.0, 1.0), (0.97478991746902466, 1.0, 1.0),
(0.97899156808853149, 1.0, 1.0), (0.98319327831268311, 1.0, 1.0),
(0.98739492893218994, 1.0, 1.0), (0.99159663915634155, 1.0, 1.0),
(0.99579828977584839, 1.0, 1.0), (1.0, 1.0, 1.0)]}
_gist_stern_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.011764706112444401,
0.011764706112444401), (0.012605042196810246, 0.019607843831181526,
0.019607843831181526), (0.016806723549962044, 0.027450980618596077,
0.027450980618596077), (0.021008403971791267, 0.035294119268655777,
0.035294119268655777), (0.025210084393620491, 0.043137256056070328,
0.043137256056070328), (0.029411764815449715, 0.050980392843484879,
0.050980392843484879), (0.033613447099924088, 0.058823529630899429,
0.058823529630899429), (0.037815127521753311, 0.066666670143604279,
0.066666670143604279), (0.042016807943582535, 0.08235294371843338,
0.08235294371843338), (0.046218488365411758, 0.090196080505847931,
0.090196080505847931), (0.050420168787240982, 0.098039217293262482,
0.098039217293262482), (0.054621849209070206, 0.10588235408067703,
0.10588235408067703), (0.058823529630899429, 0.11372549086809158,
0.11372549086809158), (0.063025213778018951, 0.12156862765550613,
0.12156862765550613), (0.067226894199848175, 0.12941177189350128,
0.12941177189350128), (0.071428574621677399, 0.13725490868091583,
0.13725490868091583), (0.075630255043506622, 0.14509804546833038,
0.14509804546833038), (0.079831935465335846, 0.15294118225574493,
0.15294118225574493), (0.08403361588716507, 0.16078431904315948,
0.16078431904315948), (0.088235296308994293, 0.16862745583057404,
0.16862745583057404), (0.092436976730823517, 0.17647059261798859,
0.17647059261798859), (0.09663865715265274, 0.18431372940540314,
0.18431372940540314), (0.10084033757448196, 0.19215686619281769,
0.19215686619281769), (0.10504201799631119, 0.20000000298023224,
0.20000000298023224), (0.10924369841814041, 0.20784313976764679,
0.20784313976764679), (0.11344537883996964, 0.21568627655506134,
0.21568627655506134), (0.11764705926179886, 0.22352941334247589,
0.22352941334247589), (0.12184873968362808, 0.23137255012989044,
0.23137255012989044), (0.1260504275560379, 0.24705882370471954,
0.24705882370471954), (0.13025210797786713, 0.25490197539329529,
0.25490197539329529), (0.13445378839969635, 0.26274511218070984,
0.26274511218070984), (0.13865546882152557, 0.27058824896812439,
0.27058824896812439), (0.1428571492433548, 0.27843138575553894,
0.27843138575553894), (0.14705882966518402, 0.28627452254295349,
0.28627452254295349), (0.15126051008701324, 0.29411765933036804,
0.29411765933036804), (0.15546219050884247, 0.30196079611778259,
0.30196079611778259), (0.15966387093067169, 0.30980393290519714,
0.30980393290519714), (0.16386555135250092, 0.31764706969261169,
0.31764706969261169), (0.16806723177433014, 0.32549020648002625,
0.32549020648002625), (0.17226891219615936, 0.3333333432674408,
0.3333333432674408), (0.17647059261798859, 0.34117648005485535,
0.34117648005485535), (0.18067227303981781, 0.3490196168422699,
0.3490196168422699), (0.18487395346164703, 0.35686275362968445,
0.35686275362968445), (0.18907563388347626, 0.364705890417099,
0.364705890417099), (0.19327731430530548, 0.37254902720451355,
0.37254902720451355), (0.1974789947271347, 0.3803921639919281,
0.3803921639919281), (0.20168067514896393, 0.38823530077934265,
0.38823530077934265), (0.20588235557079315, 0.3960784375667572,
0.3960784375667572), (0.21008403599262238, 0.4117647111415863,
0.4117647111415863), (0.2142857164144516, 0.41960784792900085,
0.41960784792900085), (0.21848739683628082, 0.42745098471641541,
0.42745098471641541), (0.22268907725811005, 0.43529412150382996,
0.43529412150382996), (0.22689075767993927, 0.44313725829124451,
0.44313725829124451), (0.23109243810176849, 0.45098039507865906,
0.45098039507865906), (0.23529411852359772, 0.45882353186607361,
0.45882353186607361), (0.23949579894542694, 0.46666666865348816,
0.46666666865348816), (0.24369747936725616, 0.47450980544090271,
0.47450980544090271), (0.24789915978908539, 0.48235294222831726,
0.48235294222831726), (0.25210085511207581, 0.49803921580314636,
0.49803921580314636), (0.25630253553390503, 0.5058823823928833,
0.5058823823928833), (0.26050421595573425, 0.51372551918029785,
0.51372551918029785), (0.26470589637756348, 0.5215686559677124,
0.5215686559677124), (0.2689075767993927, 0.52941179275512695,
0.52941179275512695), (0.27310925722122192, 0.5372549295425415,
0.5372549295425415), (0.27731093764305115, 0.54509806632995605,
0.54509806632995605), (0.28151261806488037, 0.55294120311737061,
0.55294120311737061), (0.28571429848670959, 0.56078433990478516,
0.56078433990478516), (0.28991597890853882, 0.56862747669219971,
0.56862747669219971), (0.29411765933036804, 0.58431375026702881,
0.58431375026702881), (0.29831933975219727, 0.59215688705444336,
0.59215688705444336), (0.30252102017402649, 0.60000002384185791,
0.60000002384185791), (0.30672270059585571, 0.60784316062927246,
0.60784316062927246), (0.31092438101768494, 0.61568629741668701,
0.61568629741668701), (0.31512606143951416, 0.62352943420410156,
0.62352943420410156), (0.31932774186134338, 0.63137257099151611,
0.63137257099151611), (0.32352942228317261, 0.63921570777893066,
0.63921570777893066), (0.32773110270500183, 0.64705884456634521,
0.64705884456634521), (0.33193278312683105, 0.65490198135375977,
0.65490198135375977), (0.33613446354866028, 0.66274511814117432,
0.66274511814117432), (0.3403361439704895, 0.67058825492858887,
0.67058825492858887), (0.34453782439231873, 0.67843139171600342,
0.67843139171600342), (0.34873950481414795, 0.68627452850341797,
0.68627452850341797), (0.35294118523597717, 0.69411766529083252,
0.69411766529083252), (0.3571428656578064, 0.70196080207824707,
0.70196080207824707), (0.36134454607963562, 0.70980393886566162,
0.70980393886566162), (0.36554622650146484, 0.71764707565307617,
0.71764707565307617), (0.36974790692329407, 0.72549021244049072,
0.72549021244049072), (0.37394958734512329, 0.73333334922790527,
0.73333334922790527), (0.37815126776695251, 0.74901962280273438,
0.74901962280273438), (0.38235294818878174, 0.75686275959014893,
0.75686275959014893), (0.38655462861061096, 0.76470589637756348,
0.76470589637756348), (0.39075630903244019, 0.77254903316497803,
0.77254903316497803), (0.39495798945426941, 0.78039216995239258,
0.78039216995239258), (0.39915966987609863, 0.78823530673980713,
0.78823530673980713), (0.40336135029792786, 0.79607844352722168,
0.79607844352722168), (0.40756303071975708, 0.80392158031463623,
0.80392158031463623), (0.4117647111415863, 0.81176471710205078,
0.81176471710205078), (0.41596639156341553, 0.81960785388946533,
0.81960785388946533), (0.42016807198524475, 0.82745099067687988,
0.82745099067687988), (0.42436975240707397, 0.83529412746429443,
0.83529412746429443), (0.4285714328289032, 0.84313726425170898,
0.84313726425170898), (0.43277311325073242, 0.85098040103912354,
0.85098040103912354), (0.43697479367256165, 0.85882353782653809,
0.85882353782653809), (0.44117647409439087, 0.86666667461395264,
0.86666667461395264), (0.44537815451622009, 0.87450981140136719,
0.87450981140136719), (0.44957983493804932, 0.88235294818878174,
0.88235294818878174), (0.45378151535987854, 0.89019608497619629,
0.89019608497619629), (0.45798319578170776, 0.89803922176361084,
0.89803922176361084), (0.46218487620353699, 0.91372549533843994,
0.91372549533843994), (0.46638655662536621, 0.92156863212585449,
0.92156863212585449), (0.47058823704719543, 0.92941176891326904,
0.92941176891326904), (0.47478991746902466, 0.93725490570068359,
0.93725490570068359), (0.47899159789085388, 0.94509804248809814,
0.94509804248809814), (0.48319327831268311, 0.9529411792755127,
0.9529411792755127), (0.48739495873451233, 0.96078431606292725,
0.96078431606292725), (0.49159663915634155, 0.9686274528503418,
0.9686274528503418), (0.49579831957817078, 0.97647058963775635,
0.97647058963775635), (0.5, 0.9843137264251709, 0.9843137264251709),
(0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 0.9843137264251709,
0.9843137264251709), (0.51260507106781006, 0.9686274528503418,
0.9686274528503418), (0.51680672168731689, 0.9529411792755127,
0.9529411792755127), (0.52100843191146851, 0.93333333730697632,
0.93333333730697632), (0.52521008253097534, 0.91764706373214722,
0.91764706373214722), (0.52941179275512695, 0.90196079015731812,
0.90196079015731812), (0.53361344337463379, 0.88627451658248901,
0.88627451658248901), (0.5378151535987854, 0.86666667461395264,
0.86666667461395264), (0.54201680421829224, 0.85098040103912354,
0.85098040103912354), (0.54621851444244385, 0.81960785388946533,
0.81960785388946533), (0.55042016506195068, 0.80000001192092896,
0.80000001192092896), (0.55462187528610229, 0.78431373834609985,
0.78431373834609985), (0.55882352590560913, 0.76862746477127075,
0.76862746477127075), (0.56302523612976074, 0.75294119119644165,
0.75294119119644165), (0.56722688674926758, 0.73333334922790527,
0.73333334922790527), (0.57142859697341919, 0.71764707565307617,
0.71764707565307617), (0.57563024759292603, 0.70196080207824707,
0.70196080207824707), (0.57983195781707764, 0.68627452850341797,
0.68627452850341797), (0.58403360843658447, 0.66666668653488159,
0.66666668653488159), (0.58823531866073608, 0.65098041296005249,
0.65098041296005249), (0.59243696928024292, 0.63529413938522339,
0.63529413938522339), (0.59663867950439453, 0.61960786581039429,
0.61960786581039429), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.58431375026702881,
0.58431375026702881), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.55294120311737061,
0.55294120311737061), (0.61764705181121826, 0.53333336114883423,
0.53333336114883423), (0.62184876203536987, 0.51764708757400513,
0.51764708757400513), (0.62605041265487671, 0.50196081399917603,
0.50196081399917603), (0.63025212287902832, 0.46666666865348816,
0.46666666865348816), (0.63445377349853516, 0.45098039507865906,
0.45098039507865906), (0.63865548372268677, 0.43529412150382996,
0.43529412150382996), (0.6428571343421936, 0.41960784792900085,
0.41960784792900085), (0.64705884456634521, 0.40000000596046448,
0.40000000596046448), (0.65126049518585205, 0.38431373238563538,
0.38431373238563538), (0.65546220541000366, 0.36862745881080627,
0.36862745881080627), (0.6596638560295105, 0.35294118523597717,
0.35294118523597717), (0.66386556625366211, 0.3333333432674408,
0.3333333432674408), (0.66806721687316895, 0.31764706969261169,
0.31764706969261169), (0.67226892709732056, 0.30196079611778259,
0.30196079611778259), (0.67647057771682739, 0.28627452254295349,
0.28627452254295349), (0.680672287940979, 0.26666668057441711,
0.26666668057441711), (0.68487393856048584, 0.25098040699958801,
0.25098040699958801), (0.68907564878463745, 0.23529411852359772,
0.23529411852359772), (0.69327729940414429, 0.21960784494876862,
0.21960784494876862), (0.6974790096282959, 0.20000000298023224,
0.20000000298023224), (0.70168066024780273, 0.18431372940540314,
0.18431372940540314), (0.70588237047195435, 0.16862745583057404,
0.16862745583057404), (0.71008402109146118, 0.15294118225574493,
0.15294118225574493), (0.71428573131561279, 0.11764705926179886,
0.11764705926179886), (0.71848738193511963, 0.10196078568696976,
0.10196078568696976), (0.72268909215927124, 0.086274512112140656,
0.086274512112140656), (0.72689074277877808, 0.066666670143604279,
0.066666670143604279), (0.73109245300292969, 0.050980392843484879,
0.050980392843484879), (0.73529410362243652, 0.035294119268655777,
0.035294119268655777), (0.73949581384658813, 0.019607843831181526,
0.019607843831181526), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.011764706112444401, 0.011764706112444401),
(0.75210082530975342, 0.027450980618596077, 0.027450980618596077),
(0.75630253553390503, 0.058823529630899429, 0.058823529630899429),
(0.76050418615341187, 0.074509806931018829, 0.074509806931018829),
(0.76470589637756348, 0.086274512112140656, 0.086274512112140656),
(0.76890754699707031, 0.10196078568696976, 0.10196078568696976),
(0.77310925722122192, 0.11764705926179886, 0.11764705926179886),
(0.77731090784072876, 0.13333334028720856, 0.13333334028720856),
(0.78151261806488037, 0.14901961386203766, 0.14901961386203766),
(0.78571426868438721, 0.16078431904315948, 0.16078431904315948),
(0.78991597890853882, 0.17647059261798859, 0.17647059261798859),
(0.79411762952804565, 0.19215686619281769, 0.19215686619281769),
(0.79831933975219727, 0.22352941334247589, 0.22352941334247589),
(0.8025209903717041, 0.23529411852359772, 0.23529411852359772),
(0.80672270059585571, 0.25098040699958801, 0.25098040699958801),
(0.81092435121536255, 0.26666668057441711, 0.26666668057441711),
(0.81512606143951416, 0.28235295414924622, 0.28235295414924622),
(0.819327712059021, 0.29803922772407532, 0.29803922772407532),
(0.82352942228317261, 0.30980393290519714, 0.30980393290519714),
(0.82773107290267944, 0.32549020648002625, 0.32549020648002625),
(0.83193278312683105, 0.34117648005485535, 0.34117648005485535),
(0.83613443374633789, 0.35686275362968445, 0.35686275362968445),
(0.8403361439704895, 0.37254902720451355, 0.37254902720451355),
(0.84453779458999634, 0.38431373238563538, 0.38431373238563538),
(0.84873950481414795, 0.40000000596046448, 0.40000000596046448),
(0.85294115543365479, 0.41568627953529358, 0.41568627953529358),
(0.8571428656578064, 0.43137255311012268, 0.43137255311012268),
(0.86134451627731323, 0.44705882668495178, 0.44705882668495178),
(0.86554622650146484, 0.45882353186607361, 0.45882353186607361),
(0.86974787712097168, 0.47450980544090271, 0.47450980544090271),
(0.87394958734512329, 0.49019607901573181, 0.49019607901573181),
(0.87815123796463013, 0.5058823823928833, 0.5058823823928833),
(0.88235294818878174, 0.5372549295425415, 0.5372549295425415),
(0.88655459880828857, 0.54901963472366333, 0.54901963472366333),
(0.89075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.89495795965194702, 0.58039218187332153, 0.58039218187332153),
(0.89915966987609863, 0.59607845544815063, 0.59607845544815063),
(0.90336132049560547, 0.61176472902297974, 0.61176472902297974),
(0.90756303071975708, 0.62352943420410156, 0.62352943420410156),
(0.91176468133926392, 0.63921570777893066, 0.63921570777893066),
(0.91596639156341553, 0.65490198135375977, 0.65490198135375977),
(0.92016804218292236, 0.67058825492858887, 0.67058825492858887),
(0.92436975240707397, 0.68627452850341797, 0.68627452850341797),
(0.92857140302658081, 0.69803923368453979, 0.69803923368453979),
(0.93277311325073242, 0.7137255072593689, 0.7137255072593689),
(0.93697476387023926, 0.729411780834198, 0.729411780834198),
(0.94117647409439087, 0.7450980544090271, 0.7450980544090271),
(0.94537812471389771, 0.7607843279838562, 0.7607843279838562),
(0.94957983493804932, 0.77254903316497803, 0.77254903316497803),
(0.95378148555755615, 0.78823530673980713, 0.78823530673980713),
(0.95798319578170776, 0.80392158031463623, 0.80392158031463623),
(0.9621848464012146, 0.81960785388946533, 0.81960785388946533),
(0.96638655662536621, 0.84705883264541626, 0.84705883264541626),
(0.97058820724487305, 0.86274510622024536, 0.86274510622024536),
(0.97478991746902466, 0.87843137979507446, 0.87843137979507446),
(0.97899156808853149, 0.89411765336990356, 0.89411765336990356),
(0.98319327831268311, 0.90980392694473267, 0.90980392694473267),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.031372550874948502, 0.031372550874948502),
(0.037815127521753311, 0.035294119268655777, 0.035294119268655777),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.094117648899555206, 0.094117648899555206),
(0.10084033757448196, 0.098039217293262482, 0.098039217293262482),
(0.10504201799631119, 0.10196078568696976, 0.10196078568696976),
(0.10924369841814041, 0.10588235408067703, 0.10588235408067703),
(0.11344537883996964, 0.10980392247438431, 0.10980392247438431),
(0.11764705926179886, 0.11372549086809158, 0.11372549086809158),
(0.12184873968362808, 0.11764705926179886, 0.11764705926179886),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.15686275064945221, 0.15686275064945221),
(0.16386555135250092, 0.16078431904315948, 0.16078431904315948),
(0.16806723177433014, 0.16470588743686676, 0.16470588743686676),
(0.17226891219615936, 0.16862745583057404, 0.16862745583057404),
(0.17647059261798859, 0.17254902422428131, 0.17254902422428131),
(0.18067227303981781, 0.17647059261798859, 0.17647059261798859),
(0.18487395346164703, 0.18039216101169586, 0.18039216101169586),
(0.18907563388347626, 0.18431372940540314, 0.18431372940540314),
(0.19327731430530548, 0.18823529779911041, 0.18823529779911041),
(0.1974789947271347, 0.19215686619281769, 0.19215686619281769),
(0.20168067514896393, 0.19607843458652496, 0.19607843458652496),
(0.20588235557079315, 0.20000000298023224, 0.20000000298023224),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.21960784494876862, 0.21960784494876862),
(0.22689075767993927, 0.22352941334247589, 0.22352941334247589),
(0.23109243810176849, 0.22745098173618317, 0.22745098173618317),
(0.23529411852359772, 0.23137255012989044, 0.23137255012989044),
(0.23949579894542694, 0.23529411852359772, 0.23529411852359772),
(0.24369747936725616, 0.23921568691730499, 0.23921568691730499),
(0.24789915978908539, 0.24313725531101227, 0.24313725531101227),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28235295414924622, 0.28235295414924622),
(0.28991597890853882, 0.28627452254295349, 0.28627452254295349),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.34509804844856262, 0.34509804844856262),
(0.35294118523597717, 0.3490196168422699, 0.3490196168422699),
(0.3571428656578064, 0.35294118523597717, 0.35294118523597717),
(0.36134454607963562, 0.35686275362968445, 0.35686275362968445),
(0.36554622650146484, 0.36078432202339172, 0.36078432202339172),
(0.36974790692329407, 0.364705890417099, 0.364705890417099),
(0.37394958734512329, 0.36862745881080627, 0.36862745881080627),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.40784314274787903, 0.40784314274787903),
(0.41596639156341553, 0.4117647111415863, 0.4117647111415863),
(0.42016807198524475, 0.41568627953529358, 0.41568627953529358),
(0.42436975240707397, 0.41960784792900085, 0.41960784792900085),
(0.4285714328289032, 0.42352941632270813, 0.42352941632270813),
(0.43277311325073242, 0.42745098471641541, 0.42745098471641541),
(0.43697479367256165, 0.43137255311012268, 0.43137255311012268),
(0.44117647409439087, 0.43529412150382996, 0.43529412150382996),
(0.44537815451622009, 0.43921568989753723, 0.43921568989753723),
(0.44957983493804932, 0.44313725829124451, 0.44313725829124451),
(0.45378151535987854, 0.44705882668495178, 0.44705882668495178),
(0.45798319578170776, 0.45098039507865906, 0.45098039507865906),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47058823704719543, 0.47058823704719543),
(0.47899159789085388, 0.47450980544090271, 0.47450980544090271),
(0.48319327831268311, 0.47843137383460999, 0.47843137383460999),
(0.48739495873451233, 0.48235294222831726, 0.48235294222831726),
(0.49159663915634155, 0.48627451062202454, 0.48627451062202454),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.49411764740943909, 0.49411764740943909), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.53333336114883423, 0.53333336114883423), (0.54201680421829224,
0.5372549295425415, 0.5372549295425415), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.59607845544815063, 0.59607845544815063), (0.60504204034805298,
0.60000002384185791, 0.60000002384185791), (0.60924369096755981,
0.60392159223556519, 0.60392159223556519), (0.61344540119171143,
0.60784316062927246, 0.60784316062927246), (0.61764705181121826,
0.61176472902297974, 0.61176472902297974), (0.62184876203536987,
0.61568629741668701, 0.61568629741668701), (0.62605041265487671,
0.61960786581039429, 0.61960786581039429), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.65882354974746704, 0.65882354974746704), (0.66806721687316895,
0.66274511814117432, 0.66274511814117432), (0.67226892709732056,
0.66666668653488159, 0.66666668653488159), (0.67647057771682739,
0.67058825492858887, 0.67058825492858887), (0.680672287940979,
0.67450982332229614, 0.67450982332229614), (0.68487393856048584,
0.67843139171600342, 0.67843139171600342), (0.68907564878463745,
0.68235296010971069, 0.68235296010971069), (0.69327729940414429,
0.68627452850341797, 0.68627452850341797), (0.6974790096282959,
0.69019609689712524, 0.69019609689712524), (0.70168066024780273,
0.69411766529083252, 0.69411766529083252), (0.70588237047195435,
0.69803923368453979, 0.69803923368453979), (0.71008402109146118,
0.70196080207824707, 0.70196080207824707), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72156864404678345, 0.72156864404678345), (0.73109245300292969,
0.72549021244049072, 0.72549021244049072), (0.73529410362243652,
0.729411780834198, 0.729411780834198), (0.73949581384658813,
0.73333334922790527, 0.73333334922790527), (0.74369746446609497,
0.73725491762161255, 0.73725491762161255), (0.74789917469024658,
0.74117648601531982, 0.74117648601531982), (0.75210082530975342,
0.7450980544090271, 0.7450980544090271), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78431373834609985, 0.78431373834609985), (0.79411762952804565,
0.78823530673980713, 0.78823530673980713), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.84705883264541626, 0.84705883264541626), (0.8571428656578064,
0.85098040103912354, 0.85098040103912354), (0.86134451627731323,
0.85490196943283081, 0.85490196943283081), (0.86554622650146484,
0.85882353782653809, 0.85882353782653809), (0.86974787712097168,
0.86274510622024536, 0.86274510622024536), (0.87394958734512329,
0.86666667461395264, 0.86666667461395264), (0.87815123796463013,
0.87058824300765991, 0.87058824300765991), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.90980392694473267, 0.90980392694473267), (0.92016804218292236,
0.91372549533843994, 0.91372549533843994), (0.92436975240707397,
0.91764706373214722, 0.91764706373214722), (0.92857140302658081,
0.92156863212585449, 0.92156863212585449), (0.93277311325073242,
0.92549020051956177, 0.92549020051956177), (0.93697476387023926,
0.92941176891326904, 0.92941176891326904), (0.94117647409439087,
0.93333333730697632, 0.93333333730697632), (0.94537812471389771,
0.93725490570068359, 0.93725490570068359), (0.94957983493804932,
0.94117647409439087, 0.94117647409439087), (0.95378148555755615,
0.94509804248809814, 0.94509804248809814), (0.95798319578170776,
0.94901961088180542, 0.94901961088180542), (0.9621848464012146,
0.9529411792755127, 0.9529411792755127), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97254902124404907, 0.97254902124404907), (0.98319327831268311,
0.97647058963775635, 0.97647058963775635), (0.98739492893218994,
0.98039215803146362, 0.98039215803146362), (0.99159663915634155,
0.9843137264251709, 0.9843137264251709), (0.99579828977584839,
0.98823529481887817, 0.98823529481887817), (1.0, 0.99215686321258545,
0.99215686321258545)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.070588238537311554, 0.070588238537311554), (0.0084033617749810219,
0.14117647707462311, 0.14117647707462311), (0.012605042196810246,
0.21176470816135406, 0.21176470816135406), (0.016806723549962044,
0.28235295414924622, 0.28235295414924622), (0.021008403971791267,
0.35294118523597717, 0.35294118523597717), (0.025210084393620491,
0.42352941632270813, 0.42352941632270813), (0.029411764815449715,
0.49803921580314636, 0.49803921580314636), (0.033613447099924088,
0.56862747669219971, 0.56862747669219971), (0.037815127521753311,
0.63921570777893066, 0.63921570777893066), (0.042016807943582535,
0.78039216995239258, 0.78039216995239258), (0.046218488365411758,
0.85098040103912354, 0.85098040103912354), (0.050420168787240982,
0.92156863212585449, 0.92156863212585449), (0.054621849209070206,
0.99607843160629272, 0.99607843160629272), (0.058823529630899429,
0.97647058963775635, 0.97647058963775635), (0.063025213778018951,
0.95686274766921997, 0.95686274766921997), (0.067226894199848175,
0.93725490570068359, 0.93725490570068359), (0.071428574621677399,
0.91764706373214722, 0.91764706373214722), (0.075630255043506622,
0.89803922176361084, 0.89803922176361084), (0.079831935465335846,
0.87450981140136719, 0.87450981140136719), (0.08403361588716507,
0.85490196943283081, 0.85490196943283081), (0.088235296308994293,
0.83529412746429443, 0.83529412746429443), (0.092436976730823517,
0.81568628549575806, 0.81568628549575806), (0.09663865715265274,
0.79607844352722168, 0.79607844352722168), (0.10084033757448196,
0.77254903316497803, 0.77254903316497803), (0.10504201799631119,
0.75294119119644165, 0.75294119119644165), (0.10924369841814041,
0.73333334922790527, 0.73333334922790527), (0.11344537883996964,
0.7137255072593689, 0.7137255072593689), (0.11764705926179886,
0.69411766529083252, 0.69411766529083252), (0.12184873968362808,
0.67450982332229614, 0.67450982332229614), (0.1260504275560379,
0.63137257099151611, 0.63137257099151611), (0.13025210797786713,
0.61176472902297974, 0.61176472902297974), (0.13445378839969635,
0.59215688705444336, 0.59215688705444336), (0.13865546882152557,
0.57254904508590698, 0.57254904508590698), (0.1428571492433548,
0.54901963472366333, 0.54901963472366333), (0.14705882966518402,
0.52941179275512695, 0.52941179275512695), (0.15126051008701324,
0.50980395078659058, 0.50980395078659058), (0.15546219050884247,
0.49019607901573181, 0.49019607901573181), (0.15966387093067169,
0.47058823704719543, 0.47058823704719543), (0.16386555135250092,
0.45098039507865906, 0.45098039507865906), (0.16806723177433014,
0.42745098471641541, 0.42745098471641541), (0.17226891219615936,
0.40784314274787903, 0.40784314274787903), (0.17647059261798859,
0.38823530077934265, 0.38823530077934265), (0.18067227303981781,
0.36862745881080627, 0.36862745881080627), (0.18487395346164703,
0.3490196168422699, 0.3490196168422699), (0.18907563388347626,
0.32549020648002625, 0.32549020648002625), (0.19327731430530548,
0.30588236451148987, 0.30588236451148987), (0.1974789947271347,
0.28627452254295349, 0.28627452254295349), (0.20168067514896393,
0.26666668057441711, 0.26666668057441711), (0.20588235557079315,
0.24705882370471954, 0.24705882370471954), (0.21008403599262238,
0.20392157137393951, 0.20392157137393951), (0.2142857164144516,
0.18431372940540314, 0.18431372940540314), (0.21848739683628082,
0.16470588743686676, 0.16470588743686676), (0.22268907725811005,
0.14509804546833038, 0.14509804546833038), (0.22689075767993927,
0.12549020349979401, 0.12549020349979401), (0.23109243810176849,
0.10196078568696976, 0.10196078568696976), (0.23529411852359772,
0.08235294371843338, 0.08235294371843338), (0.23949579894542694,
0.062745101749897003, 0.062745101749897003), (0.24369747936725616,
0.043137256056070328, 0.043137256056070328), (0.24789915978908539,
0.023529412224888802, 0.023529412224888802), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28235295414924622, 0.28235295414924622), (0.28991597890853882,
0.28627452254295349, 0.28627452254295349), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.34509804844856262, 0.34509804844856262), (0.35294118523597717,
0.3490196168422699, 0.3490196168422699), (0.3571428656578064,
0.35294118523597717, 0.35294118523597717), (0.36134454607963562,
0.35686275362968445, 0.35686275362968445), (0.36554622650146484,
0.36078432202339172, 0.36078432202339172), (0.36974790692329407,
0.364705890417099, 0.364705890417099), (0.37394958734512329,
0.36862745881080627, 0.36862745881080627), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.40784314274787903, 0.40784314274787903), (0.41596639156341553,
0.4117647111415863, 0.4117647111415863), (0.42016807198524475,
0.41568627953529358, 0.41568627953529358), (0.42436975240707397,
0.41960784792900085, 0.41960784792900085), (0.4285714328289032,
0.42352941632270813, 0.42352941632270813), (0.43277311325073242,
0.42745098471641541, 0.42745098471641541), (0.43697479367256165,
0.43137255311012268, 0.43137255311012268), (0.44117647409439087,
0.43529412150382996, 0.43529412150382996), (0.44537815451622009,
0.43921568989753723, 0.43921568989753723), (0.44957983493804932,
0.44313725829124451, 0.44313725829124451), (0.45378151535987854,
0.44705882668495178, 0.44705882668495178), (0.45798319578170776,
0.45098039507865906, 0.45098039507865906), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47058823704719543, 0.47058823704719543), (0.47899159789085388,
0.47450980544090271, 0.47450980544090271), (0.48319327831268311,
0.47843137383460999, 0.47843137383460999), (0.48739495873451233,
0.48235294222831726, 0.48235294222831726), (0.49159663915634155,
0.48627451062202454, 0.48627451062202454), (0.49579831957817078,
0.49019607901573181, 0.49019607901573181), (0.5, 0.49411764740943909,
0.49411764740943909), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.53333336114883423,
0.53333336114883423), (0.54201680421829224, 0.5372549295425415,
0.5372549295425415), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.59607845544815063,
0.59607845544815063), (0.60504204034805298, 0.60000002384185791,
0.60000002384185791), (0.60924369096755981, 0.60392159223556519,
0.60392159223556519), (0.61344540119171143, 0.60784316062927246,
0.60784316062927246), (0.61764705181121826, 0.61176472902297974,
0.61176472902297974), (0.62184876203536987, 0.61568629741668701,
0.61568629741668701), (0.62605041265487671, 0.61960786581039429,
0.61960786581039429), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66274511814117432,
0.66274511814117432), (0.67226892709732056, 0.66666668653488159,
0.66666668653488159), (0.67647057771682739, 0.67058825492858887,
0.67058825492858887), (0.680672287940979, 0.67450982332229614,
0.67450982332229614), (0.68487393856048584, 0.67843139171600342,
0.67843139171600342), (0.68907564878463745, 0.68235296010971069,
0.68235296010971069), (0.69327729940414429, 0.68627452850341797,
0.68627452850341797), (0.6974790096282959, 0.69019609689712524,
0.69019609689712524), (0.70168066024780273, 0.69411766529083252,
0.69411766529083252), (0.70588237047195435, 0.69803923368453979,
0.69803923368453979), (0.71008402109146118, 0.70196080207824707,
0.70196080207824707), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72156864404678345,
0.72156864404678345), (0.73109245300292969, 0.72549021244049072,
0.72549021244049072), (0.73529410362243652, 0.729411780834198,
0.729411780834198), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.74117648601531982,
0.74117648601531982), (0.75210082530975342, 0.7450980544090271,
0.7450980544090271), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78431373834609985,
0.78431373834609985), (0.79411762952804565, 0.78823530673980713,
0.78823530673980713), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.84705883264541626,
0.84705883264541626), (0.8571428656578064, 0.85098040103912354,
0.85098040103912354), (0.86134451627731323, 0.85490196943283081,
0.85490196943283081), (0.86554622650146484, 0.85882353782653809,
0.85882353782653809), (0.86974787712097168, 0.86274510622024536,
0.86274510622024536), (0.87394958734512329, 0.86666667461395264,
0.86666667461395264), (0.87815123796463013, 0.87058824300765991,
0.87058824300765991), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.90980392694473267,
0.90980392694473267), (0.92016804218292236, 0.91372549533843994,
0.91372549533843994), (0.92436975240707397, 0.91764706373214722,
0.91764706373214722), (0.92857140302658081, 0.92156863212585449,
0.92156863212585449), (0.93277311325073242, 0.92549020051956177,
0.92549020051956177), (0.93697476387023926, 0.92941176891326904,
0.92941176891326904), (0.94117647409439087, 0.93333333730697632,
0.93333333730697632), (0.94537812471389771, 0.93725490570068359,
0.93725490570068359), (0.94957983493804932, 0.94117647409439087,
0.94117647409439087), (0.95378148555755615, 0.94509804248809814,
0.94509804248809814), (0.95798319578170776, 0.94901961088180542,
0.94901961088180542), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97254902124404907,
0.97254902124404907), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_yarg_data = {'blue': [(0.0, 1.0, 1.0), (0.0042016808874905109,
0.99607843160629272, 0.99607843160629272), (0.0084033617749810219,
0.99215686321258545, 0.99215686321258545), (0.012605042196810246,
0.98823529481887817, 0.98823529481887817), (0.016806723549962044,
0.9843137264251709, 0.9843137264251709), (0.021008403971791267,
0.98039215803146362, 0.98039215803146362), (0.025210084393620491,
0.97647058963775635, 0.97647058963775635), (0.029411764815449715,
0.97254902124404907, 0.97254902124404907), (0.033613447099924088,
0.96470588445663452, 0.96470588445663452), (0.037815127521753311,
0.96078431606292725, 0.96078431606292725), (0.042016807943582535,
0.95686274766921997, 0.95686274766921997), (0.046218488365411758,
0.9529411792755127, 0.9529411792755127), (0.050420168787240982,
0.94901961088180542, 0.94901961088180542), (0.054621849209070206,
0.94509804248809814, 0.94509804248809814), (0.058823529630899429,
0.94117647409439087, 0.94117647409439087), (0.063025213778018951,
0.93725490570068359, 0.93725490570068359), (0.067226894199848175,
0.93333333730697632, 0.93333333730697632), (0.071428574621677399,
0.92941176891326904, 0.92941176891326904), (0.075630255043506622,
0.92549020051956177, 0.92549020051956177), (0.079831935465335846,
0.92156863212585449, 0.92156863212585449), (0.08403361588716507,
0.91764706373214722, 0.91764706373214722), (0.088235296308994293,
0.91372549533843994, 0.91372549533843994), (0.092436976730823517,
0.90980392694473267, 0.90980392694473267), (0.09663865715265274,
0.90196079015731812, 0.90196079015731812), (0.10084033757448196,
0.89803922176361084, 0.89803922176361084), (0.10504201799631119,
0.89411765336990356, 0.89411765336990356), (0.10924369841814041,
0.89019608497619629, 0.89019608497619629), (0.11344537883996964,
0.88627451658248901, 0.88627451658248901), (0.11764705926179886,
0.88235294818878174, 0.88235294818878174), (0.12184873968362808,
0.87843137979507446, 0.87843137979507446), (0.1260504275560379,
0.87450981140136719, 0.87450981140136719), (0.13025210797786713,
0.87058824300765991, 0.87058824300765991), (0.13445378839969635,
0.86666667461395264, 0.86666667461395264), (0.13865546882152557,
0.86274510622024536, 0.86274510622024536), (0.1428571492433548,
0.85882353782653809, 0.85882353782653809), (0.14705882966518402,
0.85490196943283081, 0.85490196943283081), (0.15126051008701324,
0.85098040103912354, 0.85098040103912354), (0.15546219050884247,
0.84705883264541626, 0.84705883264541626), (0.15966387093067169,
0.83921569585800171, 0.83921569585800171), (0.16386555135250092,
0.83529412746429443, 0.83529412746429443), (0.16806723177433014,
0.83137255907058716, 0.83137255907058716), (0.17226891219615936,
0.82745099067687988, 0.82745099067687988), (0.17647059261798859,
0.82352942228317261, 0.82352942228317261), (0.18067227303981781,
0.81960785388946533, 0.81960785388946533), (0.18487395346164703,
0.81568628549575806, 0.81568628549575806), (0.18907563388347626,
0.81176471710205078, 0.81176471710205078), (0.19327731430530548,
0.80784314870834351, 0.80784314870834351), (0.1974789947271347,
0.80392158031463623, 0.80392158031463623), (0.20168067514896393,
0.80000001192092896, 0.80000001192092896), (0.20588235557079315,
0.79607844352722168, 0.79607844352722168), (0.21008403599262238,
0.7921568751335144, 0.7921568751335144), (0.2142857164144516,
0.78823530673980713, 0.78823530673980713), (0.21848739683628082,
0.78431373834609985, 0.78431373834609985), (0.22268907725811005,
0.7764706015586853, 0.7764706015586853), (0.22689075767993927,
0.77254903316497803, 0.77254903316497803), (0.23109243810176849,
0.76862746477127075, 0.76862746477127075), (0.23529411852359772,
0.76470589637756348, 0.76470589637756348), (0.23949579894542694,
0.7607843279838562, 0.7607843279838562), (0.24369747936725616,
0.75686275959014893, 0.75686275959014893), (0.24789915978908539,
0.75294119119644165, 0.75294119119644165), (0.25210085511207581,
0.74901962280273438, 0.74901962280273438), (0.25630253553390503,
0.7450980544090271, 0.7450980544090271), (0.26050421595573425,
0.74117648601531982, 0.74117648601531982), (0.26470589637756348,
0.73725491762161255, 0.73725491762161255), (0.2689075767993927,
0.73333334922790527, 0.73333334922790527), (0.27310925722122192,
0.729411780834198, 0.729411780834198), (0.27731093764305115,
0.72549021244049072, 0.72549021244049072), (0.28151261806488037,
0.72156864404678345, 0.72156864404678345), (0.28571429848670959,
0.7137255072593689, 0.7137255072593689), (0.28991597890853882,
0.70980393886566162, 0.70980393886566162), (0.29411765933036804,
0.70588237047195435, 0.70588237047195435), (0.29831933975219727,
0.70196080207824707, 0.70196080207824707), (0.30252102017402649,
0.69803923368453979, 0.69803923368453979), (0.30672270059585571,
0.69411766529083252, 0.69411766529083252), (0.31092438101768494,
0.69019609689712524, 0.69019609689712524), (0.31512606143951416,
0.68627452850341797, 0.68627452850341797), (0.31932774186134338,
0.68235296010971069, 0.68235296010971069), (0.32352942228317261,
0.67843139171600342, 0.67843139171600342), (0.32773110270500183,
0.67450982332229614, 0.67450982332229614), (0.33193278312683105,
0.67058825492858887, 0.67058825492858887), (0.33613446354866028,
0.66666668653488159, 0.66666668653488159), (0.3403361439704895,
0.66274511814117432, 0.66274511814117432), (0.34453782439231873,
0.65882354974746704, 0.65882354974746704), (0.34873950481414795,
0.65098041296005249, 0.65098041296005249), (0.35294118523597717,
0.64705884456634521, 0.64705884456634521), (0.3571428656578064,
0.64313727617263794, 0.64313727617263794), (0.36134454607963562,
0.63921570777893066, 0.63921570777893066), (0.36554622650146484,
0.63529413938522339, 0.63529413938522339), (0.36974790692329407,
0.63137257099151611, 0.63137257099151611), (0.37394958734512329,
0.62745100259780884, 0.62745100259780884), (0.37815126776695251,
0.62352943420410156, 0.62352943420410156), (0.38235294818878174,
0.61960786581039429, 0.61960786581039429), (0.38655462861061096,
0.61568629741668701, 0.61568629741668701), (0.39075630903244019,
0.61176472902297974, 0.61176472902297974), (0.39495798945426941,
0.60784316062927246, 0.60784316062927246), (0.39915966987609863,
0.60392159223556519, 0.60392159223556519), (0.40336135029792786,
0.60000002384185791, 0.60000002384185791), (0.40756303071975708,
0.59607845544815063, 0.59607845544815063), (0.4117647111415863,
0.58823531866073608, 0.58823531866073608), (0.41596639156341553,
0.58431375026702881, 0.58431375026702881), (0.42016807198524475,
0.58039218187332153, 0.58039218187332153), (0.42436975240707397,
0.57647061347961426, 0.57647061347961426), (0.4285714328289032,
0.57254904508590698, 0.57254904508590698), (0.43277311325073242,
0.56862747669219971, 0.56862747669219971), (0.43697479367256165,
0.56470590829849243, 0.56470590829849243), (0.44117647409439087,
0.56078433990478516, 0.56078433990478516), (0.44537815451622009,
0.55686277151107788, 0.55686277151107788), (0.44957983493804932,
0.55294120311737061, 0.55294120311737061), (0.45378151535987854,
0.54901963472366333, 0.54901963472366333), (0.45798319578170776,
0.54509806632995605, 0.54509806632995605), (0.46218487620353699,
0.54117649793624878, 0.54117649793624878), (0.46638655662536621,
0.5372549295425415, 0.5372549295425415), (0.47058823704719543,
0.53333336114883423, 0.53333336114883423), (0.47478991746902466,
0.52549022436141968, 0.52549022436141968), (0.47899159789085388,
0.5215686559677124, 0.5215686559677124), (0.48319327831268311,
0.51764708757400513, 0.51764708757400513), (0.48739495873451233,
0.51372551918029785, 0.51372551918029785), (0.49159663915634155,
0.50980395078659058, 0.50980395078659058), (0.49579831957817078,
0.5058823823928833, 0.5058823823928833), (0.5, 0.50196081399917603,
0.50196081399917603), (0.50420171022415161, 0.49803921580314636,
0.49803921580314636), (0.50840336084365845, 0.49411764740943909,
0.49411764740943909), (0.51260507106781006, 0.49019607901573181,
0.49019607901573181), (0.51680672168731689, 0.48627451062202454,
0.48627451062202454), (0.52100843191146851, 0.48235294222831726,
0.48235294222831726), (0.52521008253097534, 0.47843137383460999,
0.47843137383460999), (0.52941179275512695, 0.47450980544090271,
0.47450980544090271), (0.53361344337463379, 0.47058823704719543,
0.47058823704719543), (0.5378151535987854, 0.46274510025978088,
0.46274510025978088), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.45490196347236633,
0.45490196347236633), (0.55042016506195068, 0.45098039507865906,
0.45098039507865906), (0.55462187528610229, 0.44705882668495178,
0.44705882668495178), (0.55882352590560913, 0.44313725829124451,
0.44313725829124451), (0.56302523612976074, 0.43921568989753723,
0.43921568989753723), (0.56722688674926758, 0.43529412150382996,
0.43529412150382996), (0.57142859697341919, 0.43137255311012268,
0.43137255311012268), (0.57563024759292603, 0.42745098471641541,
0.42745098471641541), (0.57983195781707764, 0.42352941632270813,
0.42352941632270813), (0.58403360843658447, 0.41960784792900085,
0.41960784792900085), (0.58823531866073608, 0.41568627953529358,
0.41568627953529358), (0.59243696928024292, 0.4117647111415863,
0.4117647111415863), (0.59663867950439453, 0.40784314274787903,
0.40784314274787903), (0.60084033012390137, 0.40000000596046448,
0.40000000596046448), (0.60504204034805298, 0.3960784375667572,
0.3960784375667572), (0.60924369096755981, 0.39215686917304993,
0.39215686917304993), (0.61344540119171143, 0.38823530077934265,
0.38823530077934265), (0.61764705181121826, 0.38431373238563538,
0.38431373238563538), (0.62184876203536987, 0.3803921639919281,
0.3803921639919281), (0.62605041265487671, 0.37647059559822083,
0.37647059559822083), (0.63025212287902832, 0.37254902720451355,
0.37254902720451355), (0.63445377349853516, 0.36862745881080627,
0.36862745881080627), (0.63865548372268677, 0.364705890417099,
0.364705890417099), (0.6428571343421936, 0.36078432202339172,
0.36078432202339172), (0.64705884456634521, 0.35686275362968445,
0.35686275362968445), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.3490196168422699,
0.3490196168422699), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.33725491166114807,
0.33725491166114807), (0.66806721687316895, 0.3333333432674408,
0.3333333432674408), (0.67226892709732056, 0.32941177487373352,
0.32941177487373352), (0.67647057771682739, 0.32549020648002625,
0.32549020648002625), (0.680672287940979, 0.32156863808631897,
0.32156863808631897), (0.68487393856048584, 0.31764706969261169,
0.31764706969261169), (0.68907564878463745, 0.31372550129890442,
0.31372550129890442), (0.69327729940414429, 0.30980393290519714,
0.30980393290519714), (0.6974790096282959, 0.30588236451148987,
0.30588236451148987), (0.70168066024780273, 0.30196079611778259,
0.30196079611778259), (0.70588237047195435, 0.29803922772407532,
0.29803922772407532), (0.71008402109146118, 0.29411765933036804,
0.29411765933036804), (0.71428573131561279, 0.29019609093666077,
0.29019609093666077), (0.71848738193511963, 0.28627452254295349,
0.28627452254295349), (0.72268909215927124, 0.28235295414924622,
0.28235295414924622), (0.72689074277877808, 0.27450981736183167,
0.27450981736183167), (0.73109245300292969, 0.27058824896812439,
0.27058824896812439), (0.73529410362243652, 0.26666668057441711,
0.26666668057441711), (0.73949581384658813, 0.26274511218070984,
0.26274511218070984), (0.74369746446609497, 0.25882354378700256,
0.25882354378700256), (0.74789917469024658, 0.25490197539329529,
0.25490197539329529), (0.75210082530975342, 0.25098040699958801,
0.25098040699958801), (0.75630253553390503, 0.24705882370471954,
0.24705882370471954), (0.76050418615341187, 0.24313725531101227,
0.24313725531101227), (0.76470589637756348, 0.23921568691730499,
0.23921568691730499), (0.76890754699707031, 0.23529411852359772,
0.23529411852359772), (0.77310925722122192, 0.23137255012989044,
0.23137255012989044), (0.77731090784072876, 0.22745098173618317,
0.22745098173618317), (0.78151261806488037, 0.22352941334247589,
0.22352941334247589), (0.78571426868438721, 0.21960784494876862,
0.21960784494876862), (0.78991597890853882, 0.21176470816135406,
0.21176470816135406), (0.79411762952804565, 0.20784313976764679,
0.20784313976764679), (0.79831933975219727, 0.20392157137393951,
0.20392157137393951), (0.8025209903717041, 0.20000000298023224,
0.20000000298023224), (0.80672270059585571, 0.19607843458652496,
0.19607843458652496), (0.81092435121536255, 0.19215686619281769,
0.19215686619281769), (0.81512606143951416, 0.18823529779911041,
0.18823529779911041), (0.819327712059021, 0.18431372940540314,
0.18431372940540314), (0.82352942228317261, 0.18039216101169586,
0.18039216101169586), (0.82773107290267944, 0.17647059261798859,
0.17647059261798859), (0.83193278312683105, 0.17254902422428131,
0.17254902422428131), (0.83613443374633789, 0.16862745583057404,
0.16862745583057404), (0.8403361439704895, 0.16470588743686676,
0.16470588743686676), (0.84453779458999634, 0.16078431904315948,
0.16078431904315948), (0.84873950481414795, 0.15686275064945221,
0.15686275064945221), (0.85294115543365479, 0.14901961386203766,
0.14901961386203766), (0.8571428656578064, 0.14509804546833038,
0.14509804546833038), (0.86134451627731323, 0.14117647707462311,
0.14117647707462311), (0.86554622650146484, 0.13725490868091583,
0.13725490868091583), (0.86974787712097168, 0.13333334028720856,
0.13333334028720856), (0.87394958734512329, 0.12941177189350128,
0.12941177189350128), (0.87815123796463013, 0.12549020349979401,
0.12549020349979401), (0.88235294818878174, 0.12156862765550613,
0.12156862765550613), (0.88655459880828857, 0.11764705926179886,
0.11764705926179886), (0.89075630903244019, 0.11372549086809158,
0.11372549086809158), (0.89495795965194702, 0.10980392247438431,
0.10980392247438431), (0.89915966987609863, 0.10588235408067703,
0.10588235408067703), (0.90336132049560547, 0.10196078568696976,
0.10196078568696976), (0.90756303071975708, 0.098039217293262482,
0.098039217293262482), (0.91176468133926392, 0.094117648899555206,
0.094117648899555206), (0.91596639156341553, 0.086274512112140656,
0.086274512112140656), (0.92016804218292236, 0.08235294371843338,
0.08235294371843338), (0.92436975240707397, 0.078431375324726105,
0.078431375324726105), (0.92857140302658081, 0.074509806931018829,
0.074509806931018829), (0.93277311325073242, 0.070588238537311554,
0.070588238537311554), (0.93697476387023926, 0.066666670143604279,
0.066666670143604279), (0.94117647409439087, 0.062745101749897003,
0.062745101749897003), (0.94537812471389771, 0.058823529630899429,
0.058823529630899429), (0.94957983493804932, 0.054901961237192154,
0.054901961237192154), (0.95378148555755615, 0.050980392843484879,
0.050980392843484879), (0.95798319578170776, 0.047058824449777603,
0.047058824449777603), (0.9621848464012146, 0.043137256056070328,
0.043137256056070328), (0.96638655662536621, 0.039215687662363052,
0.039215687662363052), (0.97058820724487305, 0.035294119268655777,
0.035294119268655777), (0.97478991746902466, 0.031372550874948502,
0.031372550874948502), (0.97899156808853149, 0.023529412224888802,
0.023529412224888802), (0.98319327831268311, 0.019607843831181526,
0.019607843831181526), (0.98739492893218994, 0.015686275437474251,
0.015686275437474251), (0.99159663915634155, 0.011764706112444401,
0.011764706112444401), (0.99579828977584839, 0.0078431377187371254,
0.0078431377187371254), (1.0, 0.0039215688593685627,
0.0039215688593685627)], 'green': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)], 'red': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)]}
Accent = colors.LinearSegmentedColormap('Accent', _Accent_data, LUTSIZE)
Blues = colors.LinearSegmentedColormap('Blues', _Blues_data, LUTSIZE)
BrBG = colors.LinearSegmentedColormap('BrBG', _BrBG_data, LUTSIZE)
BuGn = colors.LinearSegmentedColormap('BuGn', _BuGn_data, LUTSIZE)
BuPu = colors.LinearSegmentedColormap('BuPu', _BuPu_data, LUTSIZE)
Dark2 = colors.LinearSegmentedColormap('Dark2', _Dark2_data, LUTSIZE)
GnBu = colors.LinearSegmentedColormap('GnBu', _GnBu_data, LUTSIZE)
Greens = colors.LinearSegmentedColormap('Greens', _Greens_data, LUTSIZE)
Greys = colors.LinearSegmentedColormap('Greys', _Greys_data, LUTSIZE)
Oranges = colors.LinearSegmentedColormap('Oranges', _Oranges_data, LUTSIZE)
OrRd = colors.LinearSegmentedColormap('OrRd', _OrRd_data, LUTSIZE)
Paired = colors.LinearSegmentedColormap('Paired', _Paired_data, LUTSIZE)
Pastel1 = colors.LinearSegmentedColormap('Pastel1', _Pastel1_data, LUTSIZE)
Pastel2 = colors.LinearSegmentedColormap('Pastel2', _Pastel2_data, LUTSIZE)
PiYG = colors.LinearSegmentedColormap('PiYG', _PiYG_data, LUTSIZE)
PRGn = colors.LinearSegmentedColormap('PRGn', _PRGn_data, LUTSIZE)
PuBu = colors.LinearSegmentedColormap('PuBu', _PuBu_data, LUTSIZE)
PuBuGn = colors.LinearSegmentedColormap('PuBuGn', _PuBuGn_data, LUTSIZE)
PuOr = colors.LinearSegmentedColormap('PuOr', _PuOr_data, LUTSIZE)
PuRd = colors.LinearSegmentedColormap('PuRd', _PuRd_data, LUTSIZE)
Purples = colors.LinearSegmentedColormap('Purples', _Purples_data, LUTSIZE)
RdBu = colors.LinearSegmentedColormap('RdBu', _RdBu_data, LUTSIZE)
RdGy = colors.LinearSegmentedColormap('RdGy', _RdGy_data, LUTSIZE)
RdPu = colors.LinearSegmentedColormap('RdPu', _RdPu_data, LUTSIZE)
RdYlBu = colors.LinearSegmentedColormap('RdYlBu', _RdYlBu_data, LUTSIZE)
RdYlGn = colors.LinearSegmentedColormap('RdYlGn', _RdYlGn_data, LUTSIZE)
Reds = colors.LinearSegmentedColormap('Reds', _Reds_data, LUTSIZE)
Set1 = colors.LinearSegmentedColormap('Set1', _Set1_data, LUTSIZE)
Set2 = colors.LinearSegmentedColormap('Set2', _Set2_data, LUTSIZE)
Set3 = colors.LinearSegmentedColormap('Set3', _Set3_data, LUTSIZE)
Spectral = colors.LinearSegmentedColormap('Spectral', _Spectral_data, LUTSIZE)
YlGn = colors.LinearSegmentedColormap('YlGn', _YlGn_data, LUTSIZE)
YlGnBu = colors.LinearSegmentedColormap('YlGnBu', _YlGnBu_data, LUTSIZE)
YlOrBr = colors.LinearSegmentedColormap('YlOrBr', _YlOrBr_data, LUTSIZE)
YlOrRd = colors.LinearSegmentedColormap('YlOrRd', _YlOrRd_data, LUTSIZE)
gist_earth = colors.LinearSegmentedColormap('gist_earth', _gist_earth_data, LUTSIZE)
gist_gray = colors.LinearSegmentedColormap('gist_gray', _gist_gray_data, LUTSIZE)
gist_heat = colors.LinearSegmentedColormap('gist_heat', _gist_heat_data, LUTSIZE)
gist_ncar = colors.LinearSegmentedColormap('gist_ncar', _gist_ncar_data, LUTSIZE)
gist_rainbow = colors.LinearSegmentedColormap('gist_rainbow', _gist_rainbow_data, LUTSIZE)
gist_stern = colors.LinearSegmentedColormap('gist_stern', _gist_stern_data, LUTSIZE)
gist_yarg = colors.LinearSegmentedColormap('gist_yarg', _gist_yarg_data, LUTSIZE)
datad['Accent']=_Accent_data
datad['Blues']=_Blues_data
datad['BrBG']=_BrBG_data
datad['BuGn']=_BuGn_data
datad['BuPu']=_BuPu_data
datad['Dark2']=_Dark2_data
datad['GnBu']=_GnBu_data
datad['Greens']=_Greens_data
datad['Greys']=_Greys_data
datad['Oranges']=_Oranges_data
datad['OrRd']=_OrRd_data
datad['Paired']=_Paired_data
datad['Pastel1']=_Pastel1_data
datad['Pastel2']=_Pastel2_data
datad['PiYG']=_PiYG_data
datad['PRGn']=_PRGn_data
datad['PuBu']=_PuBu_data
datad['PuBuGn']=_PuBuGn_data
datad['PuOr']=_PuOr_data
datad['PuRd']=_PuRd_data
datad['Purples']=_Purples_data
datad['RdBu']=_RdBu_data
datad['RdGy']=_RdGy_data
datad['RdPu']=_RdPu_data
datad['RdYlBu']=_RdYlBu_data
datad['RdYlGn']=_RdYlGn_data
datad['Reds']=_Reds_data
datad['Set1']=_Set1_data
datad['Set2']=_Set2_data
datad['Set3']=_Set3_data
datad['Spectral']=_Spectral_data
datad['YlGn']=_YlGn_data
datad['YlGnBu']=_YlGnBu_data
datad['YlOrBr']=_YlOrBr_data
datad['YlOrRd']=_YlOrRd_data
datad['gist_earth']=_gist_earth_data
datad['gist_gray']=_gist_gray_data
datad['gist_heat']=_gist_heat_data
datad['gist_ncar']=_gist_ncar_data
datad['gist_rainbow']=_gist_rainbow_data
datad['gist_stern']=_gist_stern_data
datad['gist_yarg']=_gist_yarg_data
# reverse all the colormaps.
# reversed colormaps have '_r' appended to the name.
def revcmap(data):
data_r = {}
for key, val in data.iteritems():
valnew = [(1.-a, b, c) for a, b, c in reversed(val)]
data_r[key] = valnew
return data_r
cmapnames = datad.keys()
for cmapname in cmapnames:
cmapname_r = cmapname+'_r'
cmapdat_r = revcmap(datad[cmapname])
datad[cmapname_r] = cmapdat_r
locals()[cmapname_r] = colors.LinearSegmentedColormap(cmapname_r, cmapdat_r, LUTSIZE)
| agpl-3.0 |
genialis/resolwe-bio | resolwe_bio/tools/expression_rpkum.py | 1 | 3076 | #!/usr/bin/env python3
"""Normalize expressions to RPKUM."""
import argparse
import sys
from os.path import basename
import pandas as pd
from resolwe_runtime_utils import error, send_message
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Normalize expressions using RPKUM.")
parser.add_argument("-c", "--counts", required=True, help="Raw counts file.")
parser.add_argument("-m", "--mappability", required=True, help="Mappability file.")
parser.add_argument("-o", "--output", required=True, help="Output file name.")
return parser.parse_args()
def parse_expression_file(exp_file):
"""Parse expression file to a Pandas Series."""
try:
expression = pd.read_csv(
exp_file,
sep="\t",
compression="gzip",
usecols=["Gene", "Expression"],
index_col="Gene",
dtype={
"Gene": str,
"Expression": float,
},
squeeze=True,
)
return expression.dropna()
except (ValueError, OSError) as parse_error:
send_message(
error(
"Failed to read input file {}. {}".format(
basename(exp_file), parse_error
)
)
)
sys.exit(1)
def parse_mapability_file(mapability_file):
"""Parse mapability file to a Pandas Series."""
try:
mappability = pd.read_csv(
mapability_file,
sep="\t",
usecols=["gene_id", "coverage"],
index_col="gene_id",
dtype={
"gene_id": str,
"coverage": float,
},
squeeze=True,
)
return mappability.dropna()
except (ValueError, OSError) as parse_error:
send_message(
error(
"Failed to read mappability file {}. {}".format(
basename(mapability_file), parse_error
)
)
)
sys.exit(1)
def main():
"""Invoke when run directly as a program."""
args = parse_arguments()
mappability = parse_mapability_file(args.mappability)
expression = parse_expression_file(args.counts)
missing_genes = expression.index.difference(mappability.index)
if len(missing_genes) > 0:
send_message(
error(
"Feature ID {} is not present in the mappability file. "
"Make sure that the expressions and mappability file are "
"derived from the same annotations (GTF/GFF) file.".format(
missing_genes[0]
)
)
)
sys.exit(1)
lib_size = expression.sum()
result = 10 ** 9 * expression / lib_size / mappability
result[mappability == 0] = 0.0
result.loc[expression.index].to_csv(
args.output,
index_label="Gene",
header=["Expression"],
sep="\t",
compression="gzip",
)
if __name__ == "__main__":
main()
| apache-2.0 |
guaix-ucm/megaradrp | tools/main_plus_poly.py | 2 | 5041 | from __future__ import print_function
import glob
import shutil
import json
import pandas as pd
import os
def copy_from_folder():
for base in glob.glob("*"):
print(base)
for file in glob.glob("%s/*.json" % base):
full_file_name = "%s" % file
dest = "%s.json" % base
print("\t %s" % full_file_name)
shutil.copy(full_file_name, dest)
def read_json(name):
lin_lrv = [6163.59390, 6150.29600, 6143.06260, 6128.44990, 6096.16310,
6074.33770, 6029.99690, 5975.53400, 5944.83420, 5913.63100,
5881.89520, 5852.48790, 5433.64990, 5418.55770, 5400.56180,
5341.09320]
lin_lri = [7272.935, 7372.128, 7383.980, 7435.488, 7503.868, 7514.651,
7635.105, 7723.984, 7891.040, 7948.176, 7979.004, 8006.156,
8014.785, 8053.307, 8103.692, 8115.311, 8264.521, 8330.425,
8408.209, 8424.647, 8521.441, 8605.768, 8620.491, 8667.943]
lin_lrz = [8046.11, 8136.40, 8205.11, 8266.08, 8320.86, 8358.72, 8421.22,
8446.51, 8582.90, 8621.30, 8709.23, 8812.51, 8865.31, 9148.67,
9276.27, 9459.21, 9547.74]
lin_lrr = [7298.1436, 7173.3726, 7059.5254, 6955.3149, 6876.2925,
6658.6772, 6524.7627, 6412.5918, 6337.6206, 6326.3667,
6214.4413, 6098.8032, 6090.1079, 6020.1191, 6013.2817]
with open('%s.json' % name) as data_file:
data = json.load(data_file)
c0 = []
c1 = []
c2 = []
c3 = []
c4 = []
c5 = []
xpos = []
ypos = []
fwhm = []
wave = []
ref = []
list_fib = []
list_coeffs = []
list_ind = []
fibra = 1
contents = data['contents']
if isinstance(contents, dict):
conts = contents.values()
else:
conts = contents
for elem in conts:
list_coeffs=elem['coeff']
for linea in elem['features']:
xpos.append(linea['xpos'])
ypos.append(linea['ypos'])
fwhm.append(linea['fwhm'])
wave.append(linea['wavelength'])
ref.append(linea['reference'])
c0.append(list_coeffs[0])
c1.append(list_coeffs[1])
c2.append(list_coeffs[2])
c3.append(list_coeffs[3])
c4.append(list_coeffs[4])
c5.append(list_coeffs[5])
list_fib.append(fibra)
if 'LRZ' in name:
list_ind.append(lin_lrz.index(linea['reference']) + 1)
elif 'LRV' in name:
list_ind.append(lin_lrv.index(linea['reference']) + 1)
elif 'LRR' in name:
try:
list_ind.append(lin_lrr.index(linea['reference']) + 1)
except:
list_ind.append(0)
else:
try:
list_ind.append(lin_lri.index(linea['reference']) + 1)
except:
list_ind.append(0)
fibra += 1
data = {
'xpos': xpos,
'ypos': ypos,
'fwhm': fwhm,
'wavelength': wave,
'reference': ref,
'fibra': list_fib,
'line': list_ind,
'zc0': c0,
'zc1': c1,
'zc2': c2,
'zc3': c3,
'zc4': c4,
'zc5': c5
}
df = pd.DataFrame(data)
# Create a Pandas Excel writer using XlsxWriter as the engine.
excel = pd.ExcelWriter('%s.xlsx' % name, engine='xlsxwriter')
df.to_excel(excel, sheet_name='Sheet1', index=False)
# df.to_excel(writer, sheet_name='Sheet2')
csv = df.to_csv('%s.csv' % name, sep=' ', index=False, encoding='utf-8')
def generar_xls():
lista_ficheros = []
for base in glob.glob("*.json"):
file_name = base.split('.')[0]
lista_ficheros.append(file_name)
read_json(file_name)
lista = {'LRR':[],
'LRV':[],
'LRZ':[],
'sci_LRR':[],
'sci_LRV':[],
'sci_LRZ':[],
'sci_LRI':[],
}
for elem in lista_ficheros:
if 'LRZ' in elem:
if 'sci' in elem:
lista['sci_LRZ'].append(elem)
else:
lista['LRZ'].append(elem)
elif 'LRR' in elem:
if 'sci' in elem:
lista['sci_LRR'].append(elem)
else:
lista['LRR'].append(elem)
elif 'LRI' in elem:
if 'sci' in elem:
lista['sci_LRI'].append(elem)
else:
lista['LRI'].append(elem)
else:
if 'sci' in elem:
lista['sci_LRV'].append(elem)
else:
lista['LRV'].append(elem)
for elem in lista:
lista[elem].sort()
if lista[elem]:
file = open('%s.txt'%elem, 'w')
for item in lista[elem]:
file.write("%s\n" % item)
os.chdir(".")
generar_xls()
print('**************************Fin*************************')
| gpl-3.0 |
adamcandy/qgis-plugins-meshing-initial | dev/old/shape/extractPoints.py | 3 | 6740 | """
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, [email protected]
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
Sets the read and the write file stream according to
the command line arguments given.
The first argument specifies which shape file the user
wants to specify the boundaries of
The second arguments specifies the boundary polygon
The third argument specifies the file path to which the
new shape has to be written
"""
from shapely.ops import cascaded_union
import shapefile
from shapely.geometry import *
import sys
import matplotlib.pyplot as pyplot
"""
Write the results to .shp.
"""
def writeShapeFile(points, filepath) :
#begin the instance pf writer class
w = shapefile.Writer()
#ensure shape and records the balance
w.autobalance = 1
i = 0
for l in points:
pList = []
pList.append(l)
if len(l)==1 :
w.point(pList[0][0],pList[0][1])
w.field("%d_FLD"%i,"C","40")
i+=1
elif len(l)==2 :
w.line(parts = pList)
w.field("%d_FLD"%i,"C","40")
i+=1
else :
w.poly(parts = pList)
w.field("%d_FLD"%i,"C","40")
i += 1
w.save(filepath)
print("Number of shapes Written %d" %i)
"""
Used if there is only one shape in the boundary shapefile.
"""
def getBoundaryPointsList(bounds):
shapes = bounds.shapes()
pointsList = (shapes[0].points)
polygon = Polygon(pointsList)
return (polygon,pointsList)
"""
When more than one shapefile, works out which objects from the .shp file are overlapping and return the exterior coords of this new shape.
"""
def overlap (bounds, plot = False):
# Read shapefile and work out overlap. Optional plot.
shapes = bounds.shapes()
pointsList = []
for i in range(len(shapes)):
# Check datapoint is valid.
pointsList.append(shapes[i].points)
# Turn the points into polygons.
polygons = []
for j in range(len(pointsList)):
polygons.append(Polygon([pointsList[j][i] for i in range(len(pointsList[j]))]))
# Add overlapping shapes into a list so we know which to join together.
overlapping = []
for n in range(len(polygons) - 1):
if polygons[n].intersects(polygons[n+1]) == True:
# Two if statements to make sure the same polygon isn't being entered more than once.
if polygons[n] not in overlapping: overlapping.append(polygons[n])
if polygons[n + 1] not in overlapping: overlapping.append(polygons[n + 1])
# Create a new shape from the overlapping shapes.
join = cascaded_union(overlapping)
poly = [join]
# Take the coords. of the perimeter of this new shape.
coords = []
for i in range(len(join.exterior.coords)):
coords.append(list(join.exterior.coords[i]))
# Plot results if True. Store x-y coords of the perimeter in two lists to plot.
if plot == True:
x = []; y = []
for i in range(len(coords)):
x.append(coords[i][0]); y.append(coords[i][1])
# Plot results.
pyplot.plot(x, y)
pyplot.xlim(-4, 4)
pyplot.ylim(-4, 4)
pyplot.show()
return join, coords
"""
Output the final results as a .geo file.
"""
def write_geo (coords, filename):
# Write new shape to .geo.
print coords
target = open("%s.geo" % filename, "w") # Creates .geo file to write to.
for i in range(len(coords)):
# Write point.
target.write('Point(%d) = {%.3f, %.3f, 0, %.3f};\n' %(i + 1, coords[i][0], coords[i][1], 1.0))
# Write the lines connecting the sequential points.
if (i + 1 > 1): target.write('Line(%d) = {%d, %d};\n' % (i, i, i + 1))
# Connect first and last points.
target.write('Line(%d) = {%d, %d};\n' % (i + 1, 1, i + 1))
target.close()
return False
assert len(sys.argv)==5, "Incorrect Number of Arguments passed"
readPath = sys.argv[1]
boundaryPath = sys.argv[2]
writePath = sys.argv[3]
areaThreshold = float(sys.argv[4])
#input stream for the given shape
sf = shapefile.Reader(readPath)
#shapes contained in the given file
shapes = sf.shapes();
#boundary = bounds.shapes[0]
boundary = shapefile.Reader(boundaryPath)
if (len(boundary.shapes()) > 1): boundaryPolygons, boundaryPointList1 = overlap(boundary); boundaryPointList = [boundaryPointList1]
else: boundaryPolygons, boundaryPointList = getBoundaryPointsList(boundary)
"""
Takes shape from shapefile and converts to a Shapely Polygon. Checks if this polygon lies within the boundary using a.intersect(b). If it does it will perform a.intersection(b) operation returning a Polygon/MultiPolygon which lies within the boundary and then plots result.
"""
shapeList = []
for shape in shapes:
x = []; y = []; shp = []
polygon = Polygon([shape.points[i] for i in range(len(shape.points))])
if (polygon.intersects(boundaryPolygons)):
intersection = boundaryPolygons.intersection(polygon)
if intersection.area >= areaThreshold:
if intersection.geom_type == 'Polygon':
for i in range(len(list(intersection.exterior.coords))):
x.append(intersection.exterior.coords[i][0]); y.append(intersection.exterior.coords[i][1])
pyplot.plot(x, y)
shp.append([intersection.exterior.coords[i][0], intersection.exterior.coords[i][1]])
if intersection.geom_type == 'MultiPolygon':
for j in range(len(intersection)):
for i in range(len(list(intersection[j].exterior.coords))):
x.append(intersection[j].exterior.coords[i][0]); y.append(intersection[j].exterior.coords[i][1])
pyplot.plot(x, y)
shp.append([intersection[j].exterior.coords[i][0], intersection[j].exterior.coords[i][1]])
shapeList.append(shp)
writeShapeFile(shapeList, writePath)
# Plot boundary.
x=[]
y=[]
for i in range(len(boundaryPointList)):
x.append(boundaryPointList[i][0]); y.append(boundaryPointList[i][1])
pyplot.plot(x,y)
pyplot.xlim(min(x)-1,max(x)+1)
pyplot.ylim(min(y)-1,max(y)+1)
pyplot.show()
| lgpl-2.1 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tseries/tests/test_timedeltas.py | 7 | 78075 | # pylint: disable-msg=E1101,W0612
from __future__ import division
from datetime import timedelta, time
import nose
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Timestamp, Timedelta,
TimedeltaIndex, isnull, date_range,
timedelta_range, Int64Index)
from pandas.compat import range
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal, assert_frame_equal,
assert_almost_equal, assert_index_equal)
from pandas.tseries.offsets import Day, Second
import pandas.util.testing as tm
from numpy.random import randn
from pandas import _np_version_under1p8
iNaT = tslib.iNaT
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_get_loc_nat(self):
tidx = TimedeltaIndex(['1 days 01:00:00', 'NaT', '2 days 01:00:00'])
self.assertEqual(tidx.get_loc(pd.NaT), 1)
self.assertEqual(tidx.get_loc(None), 1)
self.assertEqual(tidx.get_loc(float('nan')), 1)
self.assertEqual(tidx.get_loc(np.nan), 1)
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertFalse((v in td))
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
self.assertTrue((v in td))
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10, unit='d').value, expected)
self.assertEqual(Timedelta(10.0, unit='d').value, expected)
self.assertEqual(Timedelta('10 days').value, expected)
self.assertEqual(Timedelta(days=10).value, expected)
self.assertEqual(Timedelta(days=10.0).value, expected)
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta('10 days 00:00:10').value, expected)
self.assertEqual(Timedelta(days=10, seconds=10).value, expected)
self.assertEqual(
Timedelta(days=10, milliseconds=10 * 1000).value, expected)
self.assertEqual(
Timedelta(days=10, microseconds=10 * 1000 * 1000).value, expected)
# test construction with np dtypes
# GH 8757
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1,
npkwarg).astype('m8[ns]').view('i8')
self.assertEqual(
Timedelta(**{pykwarg: npdtype(1)}).value, expected)
# rounding cases
self.assertEqual(Timedelta(82739999850000).value, 82739999850000)
self.assertTrue('0 days 22:58:59.999850' in str(Timedelta(
82739999850000)))
self.assertEqual(Timedelta(123072001000000).value, 123072001000000)
self.assertTrue('1 days 10:11:12.001' in str(Timedelta(
123072001000000)))
# string conversion with/without leading zero
# GH 9570
self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0))
self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1))
self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1))
# more strings & abbrevs
# GH 8190
self.assertEqual(Timedelta('1 h'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hour'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hr'), timedelta(hours=1))
self.assertEqual(Timedelta('1 hours'), timedelta(hours=1))
self.assertEqual(Timedelta('-1 hours'), -timedelta(hours=1))
self.assertEqual(Timedelta('1 m'), timedelta(minutes=1))
self.assertEqual(Timedelta('1.5 m'), timedelta(seconds=90))
self.assertEqual(Timedelta('1 minute'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 minutes'), timedelta(minutes=1))
self.assertEqual(Timedelta('1 s'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 second'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 seconds'), timedelta(seconds=1))
self.assertEqual(Timedelta('1 ms'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 milli'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 millisecond'), timedelta(milliseconds=1))
self.assertEqual(Timedelta('1 us'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 micros'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1 microsecond'), timedelta(microseconds=1))
self.assertEqual(Timedelta('1.5 microsecond'),
Timedelta('00:00:00.000001500'))
self.assertEqual(Timedelta('1 ns'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nano'), Timedelta('00:00:00.000000001'))
self.assertEqual(Timedelta('1 nanosecond'),
Timedelta('00:00:00.000000001'))
# combos
self.assertEqual(Timedelta('10 days 1 hour'),
timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h'), timedelta(days=10, hours=1))
self.assertEqual(Timedelta('10 days 1 h 1m 1s'), timedelta(
days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s'), -
timedelta(days=10, hours=1, minutes=1, seconds=1))
self.assertEqual(Timedelta('-10 days 1 h 1m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=1, microseconds=3))
self.assertEqual(Timedelta('-10 days 1 h 1.5m 1s 3us'), -
timedelta(days=10, hours=1, minutes=1,
seconds=31, microseconds=3))
# currently invalid as it has a - on the hhmmdd part (only allowed on
# the days)
self.assertRaises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
self.assertRaises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
self.assertRaises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assertRaisesRegexp(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assertRaisesRegexp(ValueError, "unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assertRaisesRegexp(ValueError,
"cannot construct a Timedelta from the passed "
"arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# roundtripping both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
self.assertEqual(Timedelta(td.value), td)
# str does not normally display nanos
if not td.nanoseconds:
self.assertEqual(Timedelta(str(td)), td)
self.assertEqual(Timedelta(td._repr_base(format='all')), td)
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
self.assertEqual(Timedelta(10.5, unit='s').value, expected)
# nat
self.assertEqual(Timedelta('').value, iNaT)
self.assertEqual(Timedelta('nat').value, iNaT)
self.assertEqual(Timedelta('NAT').value, iNaT)
self.assertEqual(Timedelta(None).value, iNaT)
self.assertEqual(Timedelta(np.nan).value, iNaT)
self.assertTrue(isnull(Timedelta('nat')))
# offset
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Hour(2)),
Timedelta('0 days, 02:00:00'))
self.assertEqual(Timedelta(pd.offsets.Second(2)),
Timedelta('0 days, 00:00:02'))
# unicode
# GH 11995
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
self.assertEqual(result, expected)
self.assertEqual(to_timedelta(pd.offsets.Hour(2)),
Timedelta(u'0 days, 02:00:00'))
self.assertRaises(ValueError, lambda: Timedelta(u'foo bar'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
self.assertEqual(r1, s1)
r2 = t2.round(freq)
self.assertEqual(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
self.assertRaises(ValueError, lambda: t1.round(freq))
def test_repr(self):
self.assertEqual(repr(Timedelta(10, unit='d')),
"Timedelta('10 days 00:00:00')")
self.assertEqual(repr(Timedelta(10, unit='s')),
"Timedelta('0 days 00:00:10')")
self.assertEqual(repr(Timedelta(10, unit='ms')),
"Timedelta('0 days 00:00:00.010000')")
self.assertEqual(repr(Timedelta(-10, unit='ms')),
"Timedelta('-1 days +23:59:59.990000')")
def test_identity(self):
td = Timedelta(10, unit='d')
self.assertTrue(isinstance(td, Timedelta))
self.assertTrue(isinstance(td, timedelta))
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
self.assertTrue(td == Timedelta(pydt))
self.assertEqual(td, pydt)
self.assertTrue(isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
self.assertEqual(td, np.timedelta64(td.value, 'ns'))
td64 = td.to_timedelta64()
self.assertEqual(td64, np.timedelta64(td.value, 'ns'))
self.assertEqual(td, td64)
self.assertTrue(isinstance(td64, np.timedelta64))
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
self.assertTrue(td != td.to_pytimedelta())
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_freq_conversion(self):
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
self.assertEqual(result, td.value / float(86400 * 1e9))
result = td / np.timedelta64(1, 's')
self.assertEqual(result, td.value / float(1e9))
result = td / np.timedelta64(1, 'ns')
self.assertEqual(result, td.value)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
self.assert_numpy_array_equal(result, expected)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
td = Timedelta('1 day')
for l, r in [(td, 'a'), ('a', td)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
self.assertFalse(l == r)
self.assertTrue(l != r)
def test_fields(self):
def check(value):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
self.assertEqual(rng.days, 1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 0)
self.assertEqual(rng.nanoseconds, 0)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
self.assertEqual(abs(td), Timedelta('13:48:48'))
self.assertTrue(str(td) == "-1 days +10:11:12")
self.assertEqual(-td, Timedelta('0 days 13:48:48'))
self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000)
self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000)
rng = to_timedelta('-1 days, 10:11:12.100123456')
self.assertEqual(rng.days, -1)
self.assertEqual(rng.seconds, 10 * 3600 + 11 * 60 + 12)
self.assertEqual(rng.microseconds, 100 * 1000 + 123)
self.assertEqual(rng.nanoseconds, 456)
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
self.assertEqual(tup.days, -1)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
self.assertEqual(tup.days, -2)
self.assertEqual(tup.hours, 23)
self.assertEqual(tup.minutes, 59)
self.assertEqual(tup.seconds, 59)
self.assertEqual(tup.milliseconds, 999)
self.assertEqual(tup.microseconds, 999)
self.assertEqual(tup.nanoseconds, 0)
def test_timedelta_range(self):
expected = to_timedelta(np.arange(5), unit='D')
result = timedelta_range('0 days', periods=5, freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(11), unit='D')
result = timedelta_range('0 days', '10 days', freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(5), unit='D') + Second(2) + Day()
result = timedelta_range('1 days, 00:00:02', '5 days, 00:00:02',
freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta([1, 3, 5, 7, 9], unit='D') + Second(2)
result = timedelta_range('1 days, 00:00:02', periods=5, freq='2D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(50), unit='T') * 30
result = timedelta_range('0 days', freq='30T', periods=50)
tm.assert_index_equal(result, expected)
# GH 11776
arr = np.arange(10).reshape(2, 5)
df = pd.DataFrame(np.arange(10).reshape(2, 5))
for arg in (arr, df):
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_timedelta(arg)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_timedelta(arg, errors=errors)
# issue10583
df = pd.DataFrame(np.random.normal(size=(10, 4)))
df.index = pd.timedelta_range(start='0s', periods=10, freq='s')
expected = df.loc[pd.Timedelta('0s'):, :]
result = df.loc['0s':, :]
assert_frame_equal(expected, result)
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0, 'ns'))
self.assertEqual(ct(10), np.timedelta64(10, 'ns'))
self.assertEqual(ct(10, unit='ns'), np.timedelta64(
10, 'ns').astype('m8[ns]'))
self.assertEqual(ct(10, unit='us'), np.timedelta64(
10, 'us').astype('m8[ns]'))
self.assertEqual(ct(10, unit='ms'), np.timedelta64(
10, 'ms').astype('m8[ns]'))
self.assertEqual(ct(10, unit='s'), np.timedelta64(
10, 's').astype('m8[ns]'))
self.assertEqual(ct(10, unit='d'), np.timedelta64(
10, 'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
self.assertEqual(ct(timedelta(seconds=1)),
np.timedelta64(1, 's').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)),
np.timedelta64(1, 'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)),
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10, 'ns'))
self.assertEqual(ct('100'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100, 'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000, 'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000, 'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000, 'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000, 'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000, 'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000, 'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000, 'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000, 'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000, 'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000, 'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1, 'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10, 'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100, 'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000, 'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000, 'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000, 'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000, 'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1, 's')))
self.assertEqual(ct('06:00:01'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.0'), conv(
np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('06:00:01.01'), conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
self.assertEqual(ct('- 1days, 00:00:01'),
conv(-d1 + np.timedelta64(1, 's')))
self.assertEqual(ct('1days, 06:00:01'), conv(
d1 + np.timedelta64(6 * 3600 + 1, 's')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_nat_converters(self):
self.assertEqual(to_timedelta(
'nat', box=False).astype('int64'), tslib.iNaT)
self.assertEqual(to_timedelta(
'nan', box=False).astype('int64'), tslib.iNaT)
def test_to_timedelta(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
self.assertEqual(to_timedelta('1 days 06:05:01.00003', box=False),
conv(d1 + np.timedelta64(6 * 3600 +
5 * 60 + 1, 's') +
np.timedelta64(30, 'us')))
self.assertEqual(to_timedelta('15.5us', box=False),
conv(np.timedelta64(15500, 'ns')))
# empty string
result = to_timedelta('', box=False)
self.assertEqual(result.astype('int64'), tslib.iNaT)
result = to_timedelta(['', ''])
self.assertTrue(isnull(result).all())
# pass thru
result = to_timedelta(np.array([np.timedelta64(1, 's')]))
expected = pd.Index(np.array([np.timedelta64(1, 's')]))
tm.assert_index_equal(result, expected)
# ints
result = np.timedelta64(0, 'ns')
expected = to_timedelta(0, box=False)
self.assertEqual(result, expected)
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d', '1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = TimedeltaIndex([np.timedelta64(0, 'ns'), np.timedelta64(
10, 's').astype('m8[ns]')])
expected = to_timedelta([0, 10], unit='s')
tm.assert_index_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
# arrays of various dtypes
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='s')
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='m')
expected = TimedeltaIndex([np.timedelta64(1, 'm')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='h')
expected = TimedeltaIndex([np.timedelta64(1, 'h')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[s]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[D]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 'D')] * 5)
tm.assert_index_equal(result, expected)
# Test with lists as input when box=false
expected = np.array(np.arange(3) * 1000000000, dtype='timedelta64[ns]')
result = to_timedelta(range(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta(np.arange(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta([0, 1, 2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
# Tests with fractional seconds as input:
expected = np.array(
[0, 500000000, 800000000, 1200000000], dtype='timedelta64[ns]')
result = to_timedelta([0., 0.5, 0.8, 1.2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
self.assertEqual(result, expected)
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_to_timedelta_invalid(self):
# bad value for errors parameter
msg = "errors must be one of"
tm.assertRaisesRegexp(ValueError, msg, to_timedelta,
['foo'], errors='never')
# these will error
self.assertRaises(ValueError, lambda: to_timedelta([1, 2], unit='foo'))
self.assertRaises(ValueError, lambda: to_timedelta(1, unit='foo'))
# time not supported ATM
self.assertRaises(ValueError, lambda: to_timedelta(time(second=1)))
self.assertTrue(to_timedelta(
time(second=1), errors='coerce') is pd.NaT)
self.assertRaises(ValueError, lambda: to_timedelta(['foo', 'bar']))
tm.assert_index_equal(TimedeltaIndex([pd.NaT, pd.NaT]),
to_timedelta(['foo', 'bar'], errors='coerce'))
tm.assert_index_equal(TimedeltaIndex(['1 day', pd.NaT, '1 min']),
to_timedelta(['1 day', 'bar', '1 min'],
errors='coerce'))
# gh-13613: these should not error because errors='ignore'
invalid_data = 'apple'
self.assertEqual(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
invalid_data = ['apple', '1 days']
tm.assert_numpy_array_equal(
np.array(invalid_data, dtype=object),
to_timedelta(invalid_data, errors='ignore'))
invalid_data = pd.Index(['apple', '1 days'])
tm.assert_index_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
invalid_data = Series(['apple', '1 days'])
tm.assert_series_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1, 's')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_overflow(self):
# GH 9442
s = Series(pd.date_range('20130101', periods=100000, freq='H'))
s[0] += pd.Timedelta('1s 1ms')
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)
).sum())
# the computation is converted to float so might be some loss of
# precision
self.assertTrue(np.allclose(result.value / 1000, expected.value /
1000))
# sum
self.assertRaises(ValueError, lambda: (s - s.min()).sum())
s1 = s[0:10000]
self.assertRaises(ValueError, lambda: (s1 - s1.min()).sum())
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_to_timedelta_on_missing_values(self):
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'),
timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
self.assertEqual(actual.value, timedelta_NaT.astype('int64'))
actual = pd.to_timedelta(pd.NaT)
self.assertEqual(actual.value, timedelta_NaT.astype('int64'))
def test_to_timedelta_on_nanoseconds(self):
# GH 9273
result = Timedelta(nanoseconds=100)
expected = Timedelta('100ns')
self.assertEqual(result, expected)
result = Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1,
milliseconds=1, microseconds=1, nanoseconds=1)
expected = Timedelta(694861001001001)
self.assertEqual(result, expected)
result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1)
expected = Timedelta('1us1ns')
self.assertEqual(result, expected)
result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1)
expected = Timedelta('999ns')
self.assertEqual(result, expected)
result = Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2)
expected = Timedelta('990ns')
self.assertEqual(result, expected)
self.assertRaises(TypeError, lambda: Timedelta(nanoseconds='abc'))
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]
# TODO: unused?
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = self.round_trip_pickle(v)
self.assertEqual(v, v_p)
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, 'D')
td = timedelta(days=1)
self.assertEqual(hash(v), hash(td))
d = {td: 2}
self.assertEqual(d[v], 2)
tds = timedelta_range('1 second', periods=20)
self.assertTrue(all(hash(td) == hash(td.to_pytimedelta()) for td in
tds))
# python timedeltas drop ns resolution
ns_td = Timedelta(1, 'ns')
self.assertNotEqual(hash(ns_td), hash(ns_td.to_pytimedelta()))
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
self.assertTrue(min_td.value == np.iinfo(np.int64).min + 1)
self.assertTrue(max_td.value == np.iinfo(np.int64).max)
# Beyond lower limit, a NAT before the Overflow
self.assertIsInstance(min_td - Timedelta(1, 'ns'),
pd.tslib.NaTType)
with tm.assertRaises(OverflowError):
min_td - Timedelta(2, 'ns')
with tm.assertRaises(OverflowError):
max_td + Timedelta(1, 'ns')
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, 'ns')
self.assertIsInstance(td, pd.tslib.NaTType)
with tm.assertRaises(OverflowError):
Timedelta(min_td.value - 2, 'ns')
with tm.assertRaises(OverflowError):
Timedelta(max_td.value + 1, 'ns')
class TestTimedeltaIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_pass_TimedeltaIndex_to_index(self):
rng = timedelta_range('1 days', '10 days')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_pickle(self):
rng = timedelta_range('1 days', periods=10)
rng_p = self.round_trip_pickle(rng)
tm.assert_index_equal(rng, rng_p)
def test_hash_error(self):
index = timedelta_range('1 days', periods=10)
with tm.assertRaisesRegexp(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_append_join_nondatetimeindex(self):
rng = timedelta_range('1 days', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assertIsInstance(result[0], Timedelta)
# it works
rng.join(idx, how='outer')
def test_append_numpy_bug_1681(self):
td = timedelta_range('1 days', '10 days', freq='2D')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': td}, index=td)
str(c)
result = a.append(c)
self.assertTrue((result['B'] == td).all())
def test_fields(self):
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
self.assert_numpy_array_equal(rng.days, np.array(
[1, 1], dtype='int64'))
self.assert_numpy_array_equal(
rng.seconds,
np.array([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13],
dtype='int64'))
self.assert_numpy_array_equal(rng.microseconds, np.array(
[100 * 1000 + 123, 100 * 1000 + 123], dtype='int64'))
self.assert_numpy_array_equal(rng.nanoseconds, np.array(
[456, 456], dtype='int64'))
self.assertRaises(AttributeError, lambda: rng.hours)
self.assertRaises(AttributeError, lambda: rng.minutes)
self.assertRaises(AttributeError, lambda: rng.milliseconds)
# with nat
s = Series(rng)
s[1] = np.nan
tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1]))
tm.assert_series_equal(s.dt.seconds, Series(
[10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1]))
def test_total_seconds(self):
# GH 10939
# test index
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]
tm.assert_almost_equal(rng.total_seconds(), np.array(expt))
# test Series
s = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(s.dt.total_seconds(), s_expt)
# with nat
s[1] = np.nan
s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 +
12 + 100123456. / 1e9, np.nan], index=[0, 1])
tm.assert_series_equal(s.dt.total_seconds(), s_expt)
# with both nat
s = Series([np.nan, np.nan], dtype='timedelta64[ns]')
tm.assert_series_equal(s.dt.total_seconds(),
Series([np.nan, np.nan], index=[0, 1]))
def test_total_seconds_scalar(self):
# GH 10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
self.assertTrue(np.isnan(rng.total_seconds()))
def test_components(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
self.assertFalse(result.iloc[0].isnull().all())
self.assertTrue(result.iloc[1].isnull().all())
def test_constructor(self):
expected = TimedeltaIndex(['1 days', '1 days 00:00:05', '2 days',
'2 days 00:00:02', '0 days 00:00:03'])
result = TimedeltaIndex(['1 days', '1 days, 00:00:05', np.timedelta64(
2, 'D'), timedelta(days=2, seconds=2), pd.offsets.Second(3)])
tm.assert_index_equal(result, expected)
# unicode
result = TimedeltaIndex([u'1 days', '1 days, 00:00:05', np.timedelta64(
2, 'D'), timedelta(days=2, seconds=2), pd.offsets.Second(3)])
expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01',
'0 days 00:00:02'])
tm.assert_index_equal(TimedeltaIndex(range(3), unit='s'), expected)
expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:05',
'0 days 00:00:09'])
tm.assert_index_equal(TimedeltaIndex([0, 5, 9], unit='s'), expected)
expected = TimedeltaIndex(
['0 days 00:00:00.400', '0 days 00:00:00.450',
'0 days 00:00:01.200'])
tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit='ms'),
expected)
def test_constructor_coverage(self):
rng = timedelta_range('1 days', periods=10.5)
exp = timedelta_range('1 days', periods=10)
self.assert_index_equal(rng, exp)
self.assertRaises(ValueError, TimedeltaIndex, start='1 days',
periods='foo', freq='D')
self.assertRaises(ValueError, TimedeltaIndex, start='1 days',
end='10 days')
self.assertRaises(ValueError, TimedeltaIndex, '1 days')
# generator expression
gen = (timedelta(i) for i in range(10))
result = TimedeltaIndex(gen)
expected = TimedeltaIndex([timedelta(i) for i in range(10)])
self.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['1 days', '2 days', '3 days'])
result = TimedeltaIndex(strings)
expected = to_timedelta([1, 2, 3], unit='d')
self.assert_index_equal(result, expected)
from_ints = TimedeltaIndex(expected.asi8)
self.assert_index_equal(from_ints, expected)
# non-conforming freq
self.assertRaises(ValueError, TimedeltaIndex,
['1 days', '2 days', '4 days'], freq='D')
self.assertRaises(ValueError, TimedeltaIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = TimedeltaIndex(start='1 days', periods=1, freq='D', name='TEST')
self.assertEqual(idx.name, 'TEST')
# GH10025
idx2 = TimedeltaIndex(idx, name='something else')
self.assertEqual(idx2.name, 'something else')
def test_freq_conversion(self):
# doc example
# series
td = Series(date_range('20130101', periods=4)) - \
Series(date_range('20121201', periods=4))
td[2] += timedelta(minutes=5, seconds=3)
td[3] = np.nan
result = td / np.timedelta64(1, 'D')
expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan
])
assert_series_equal(result, expected)
result = td.astype('timedelta64[D]')
expected = Series([31, 31, 31, np.nan])
assert_series_equal(result, expected)
result = td / np.timedelta64(1, 's')
expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,
np.nan])
assert_series_equal(result, expected)
result = td.astype('timedelta64[s]')
assert_series_equal(result, expected)
# tdi
td = TimedeltaIndex(td)
result = td / np.timedelta64(1, 'D')
expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])
assert_index_equal(result, expected)
result = td.astype('timedelta64[D]')
expected = Index([31, 31, 31, np.nan])
assert_index_equal(result, expected)
result = td / np.timedelta64(1, 's')
expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,
np.nan])
assert_index_equal(result, expected)
result = td.astype('timedelta64[s]')
assert_index_equal(result, expected)
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
exp = np.array([True, True, True] + [False] * 7)
self.assert_numpy_array_equal(result, exp)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
if _np_version_under1p8:
# cannot test array because np.datetime('nat') returns today's date
cases = [(tdidx1, tdidx2)]
else:
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
self.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
self.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
self.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
self.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
self.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
self.assert_numpy_array_equal(result, expected)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_map(self):
rng = timedelta_range('1 day', periods=10)
f = lambda x: x.days
result = rng.map(f)
exp = np.array([f(x) for x in rng], dtype=np.int64)
self.assert_numpy_array_equal(result, exp)
def test_misc_coverage(self):
rng = timedelta_range('1 day', periods=5)
result = rng.groupby(rng.days)
tm.assertIsInstance(list(result.values())[0][0], Timedelta)
idx = TimedeltaIndex(['3d', '1d', '2d'])
self.assertFalse(idx.equals(list(idx)))
non_td = Index(list('abc'))
self.assertFalse(idx.equals(list(non_td)))
def test_union(self):
i1 = timedelta_range('1day', periods=5)
i2 = timedelta_range('3day', periods=5)
result = i1.union(i2)
expected = timedelta_range('1day', periods=7)
self.assert_index_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = TimedeltaIndex(start='1 day', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_coverage(self):
idx = TimedeltaIndex(['3d', '1d', '2d'])
ordered = TimedeltaIndex(idx.sort_values(), freq='infer')
result = ordered.union(idx)
self.assert_index_equal(result, ordered)
result = ordered[:0].union(ordered)
self.assert_index_equal(result, ordered)
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = timedelta_range('1 day', periods=4, freq='3H')
rng_b = timedelta_range('1 day', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assert_index_equal(result, exp)
def test_union_bug_1745(self):
left = TimedeltaIndex(['1 day 15:19:49.695000'])
right = TimedeltaIndex(['2 day 13:04:21.322000',
'1 day 15:27:24.873000',
'1 day 15:31:05.350000'])
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
self.assert_index_equal(result, exp)
def test_union_bug_4564(self):
left = timedelta_range("1 day", "30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
self.assert_index_equal(result, exp)
def test_intersection_bug_1708(self):
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1 & index_2
self.assertEqual(len(result), 0)
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1 & index_2
expected = timedelta_range('1 day 01:00:00', periods=3, freq='h')
tm.assert_index_equal(result, expected)
def test_get_duplicates(self):
idx = TimedeltaIndex(['1 day', '2 day', '2 day', '3 day', '3day',
'4day'])
result = idx.get_duplicates()
ex = TimedeltaIndex(['2 day', '3day'])
self.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = TimedeltaIndex(['1 day 00:00:05', '1 day 00:00:01',
'1 day 00:00:02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_sort_values(self):
idx = TimedeltaIndex(['4d', '1d', '2d'])
ordered = idx.sort_values()
self.assertTrue(ordered.is_monotonic)
ordered = idx.sort_values(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.sort_values(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer,
np.array([1, 2, 0]),
check_dtype=False)
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer,
np.array([0, 2, 1]),
check_dtype=False)
def test_insert(self):
idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx')
result = idx.insert(2, timedelta(days=5))
exp = TimedeltaIndex(['4day', '1day', '5day', '2day'], name='idx')
self.assert_index_equal(result, exp)
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'),
Timedelta('2day')], name='idx')
self.assertNotIsInstance(result, TimedeltaIndex)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx')
# preserve freq
expected_0 = TimedeltaIndex(['1day', '1day 00:00:01', '1day 00:00:02',
'1day 00:00:03'],
name='idx', freq='s')
expected_3 = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:04'],
name='idx', freq='s')
# reset freq to None
expected_1_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:01',
'1day 00:00:02', '1day 00:00:03'],
name='idx', freq=None)
expected_3_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:05'],
name='idx', freq=None)
cases = [(0, Timedelta('1day'), expected_0),
(-3, Timedelta('1day'), expected_0),
(3, Timedelta('1day 00:00:04'), expected_3),
(1, Timedelta('1day 00:00:01'), expected_1_nofreq),
(3, Timedelta('1day 00:00:05'), expected_3_nofreq)]
for n, d, expected in cases:
result = idx.insert(n, d)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
def test_delete(self):
idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx')
# prserve freq
expected_0 = timedelta_range(start='2 Days', periods=4, freq='D',
name='idx')
expected_4 = timedelta_range(start='1 Days', periods=4, freq='D',
name='idx')
# reset freq to None
expected_1 = TimedeltaIndex(
['1 day', '3 day', '4 day', '5 day'], freq=None, name='idx')
cases = {0: expected_0,
-5: expected_0,
-1: expected_4,
4: expected_4,
1: expected_1}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_delete_slice(self):
idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx')
# prserve freq
expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D',
name='idx')
expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D',
name='idx')
# reset freq to None
expected_3_5 = TimedeltaIndex(['1 d', '2 d', '3 d',
'7 d', '8 d', '9 d', '10d'],
freq=None, name='idx')
cases = {(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
result = idx.delete(slice(n[0], n[-1] + 1))
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
def test_take(self):
tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00']
idx = TimedeltaIndex(start='1d', end='2d', freq='H', name='idx')
expected = TimedeltaIndex(tds, freq=None, name='idx')
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2, 4, 10]]
for taken in [taken1, taken2]:
self.assert_index_equal(taken, expected)
tm.assertIsInstance(taken, TimedeltaIndex)
self.assertIsNone(taken.freq)
self.assertEqual(taken.name, expected.name)
def test_take_fill_value(self):
# GH 12631
idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'],
name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'],
name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with tm.assertRaises(IndexError):
idx.take(np.array([1, -5]))
def test_isin(self):
index = tm.makeTimedeltaIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='td')
str(df)
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
tm.assert_index_equal(cols, joined)
def test_slice_keeps_name(self):
# GH4226
dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = timedelta_range('1 day', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
tm.assert_index_equal(index, joined)
def test_factorize(self):
idx1 = TimedeltaIndex(['1 day', '1 day', '2 day', '2 day', '3 day',
'3 day'])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = TimedeltaIndex(['1 day', '2 day', '3 day'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = timedelta_range('1 day', periods=4, freq='s')
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assert_index_equal(idx, idx3)
class TestSlicing(tm.TestCase):
def test_partial_slice(self):
rng = timedelta_range('1 day 10:11:12', freq='h', periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['5 day':'6 day']
expected = s.iloc[86:134]
assert_series_equal(result, expected)
result = s['5 day':]
expected = s.iloc[86:]
assert_series_equal(result, expected)
result = s[:'6 day']
expected = s.iloc[:134]
assert_series_equal(result, expected)
result = s['6 days, 23:11:12']
self.assertEqual(result, s.iloc[133])
self.assertRaises(KeyError, s.__getitem__, '50 days')
def test_partial_slice_high_reso(self):
# higher reso
rng = timedelta_range('1 day 10:11:12', freq='us', periods=2000)
s = Series(np.arange(len(rng)), index=rng)
result = s['1 day 10:11:12':]
expected = s.iloc[0:]
assert_series_equal(result, expected)
result = s['1 day 10:11:12.001':]
expected = s.iloc[1000:]
assert_series_equal(result, expected)
result = s['1 days, 10:11:12.001001']
self.assertEqual(result, s.iloc[1001])
def test_slice_with_negative_step(self):
ts = Series(np.arange(20), timedelta_range('0', periods=20, freq='H'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(ts[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Timedelta(hours=7)::-1], SLC[7::-1])
assert_slices_equivalent(SLC['7 hours'::-1], SLC[7::-1])
assert_slices_equivalent(SLC[:Timedelta(hours=7):-1], SLC[:6:-1])
assert_slices_equivalent(SLC[:'7 hours':-1], SLC[:6:-1])
assert_slices_equivalent(SLC['15 hours':'7 hours':-1], SLC[15:6:-1])
assert_slices_equivalent(SLC[Timedelta(hours=15):Timedelta(hours=7):-
1], SLC[15:6:-1])
assert_slices_equivalent(SLC['15 hours':Timedelta(hours=7):-1],
SLC[15:6:-1])
assert_slices_equivalent(SLC[Timedelta(hours=15):'7 hours':-1],
SLC[15:6:-1])
assert_slices_equivalent(SLC['7 hours':'15 hours':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20), timedelta_range('0', periods=20, freq='H'))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4D')
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2D')
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_add_overflow(self):
# see gh-14068
msg = "too (big|large) to convert"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
msg = "Overflow in int64 addition"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta([106580], 'D')
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
hippo91/XVOF | xfv/post_processing/free_surface_velocity.py | 1 | 4486 | # -*- coding: utf-8 -*-
"""
Plot the free surface velocity eventually with experimental data
"""
import argparse
import pathlib
import numpy as np
import matplotlib.pyplot as plt
from xfv.post_processing.tools.hdf5_postprocessing_tools import get_field_evolution_in_time_for_item
def run():
"""
Run post processing program
"""
# ----------------------------------------------------------
# Read instructions
# ----------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true", help="Increase program verbosity")
parser.add_argument("-case", action='append', nargs='+',
help="the path to the output repository from /xfv/tests/")
parser.add_argument("-experimental_data", help="the path to experimental data to plot")
parser.add_argument("--output_filename", default="all_fields.hdf5",
help="the name of the output hdf5 band (default = all_fields.hdf5)")
parser.add_argument("--shift_t0", action="store_true",
help="Shift time origin to put t0 when the velocity signal arrives on "
"the free surface velocity")
parser.add_argument("--write_data", default="free_surface_velocity.dat",
help="Name of the output file")
args = parser.parse_args()
if args.case is None:
raise ValueError("At least one case is needed. Use -case to specify cases to plot")
if args.verbose:
print("Cases : ")
print(args.case)
if args.experimental_data is not None:
print("Experimental data : " + args.experimental_data)
print("~~~~~~~~~~~~~")
exp_data = args.experimental_data
# ----------------------------------------------------------
# Prepare figure
# ----------------------------------------------------------
plt.figure(1)
plt.title("Evolution of the free surface velocity", fontweight='bold', fontsize=18)
plt.xlabel("Time [mus]", fontsize=16)
plt.ylabel("Free surface velocity [m/s]", fontsize=16)
# ----------------------------------------------------------
# Plot free surface velocity for each case
# ----------------------------------------------------------
for case in args.case[0]:
if args.verbose:
print("Case is : " + case)
path_to_db = pathlib.Path.cwd().joinpath("..", "tests", case, args.output_filename)
if args.verbose:
print("Path to database : {:}".format(path_to_db))
print("Read VelocityField in database... ")
# Read database :
# Free surface is the last node => index -1 in Numpy array
item_history = get_field_evolution_in_time_for_item(path_to_db, -1, "ClassicalNodeVelocity")
# NodeVelocity => calls the true_field_extraction with NodeCoordinates reading
# ClassicalNodeVelocity => calls field_extraction to get the field without NodeCoordinates extraction
if args.verbose:
print("Done !")
print("~~~~~~~~~~~~~")
# Plot velocity with t0 = detection of free surface movement:
time = item_history[:, 0]
velocity = item_history[:, 1]
time_0 = 0.
if args.shift_t0:
time_0 = time[velocity > 1][0] # 1st time where non zero velocity
if args.verbose:
print("New t0 is : " + str(time_0))
plt.plot((time - time_0) * 1.e+6, velocity, label=case)
if (args.write_data):
data_path=case + args.write_data
with open(data_path, "w") as file_object:
for x_data, y_data in zip(time, velocity):
file_object.write("{:20.18g}\t{:20.18g}\n".format(x_data, y_data))
print("Data written in {:s}".format(data_path))
# ----------------------------------------------------------
# Plot experimental data
# ----------------------------------------------------------
if exp_data is not None:
experimental_velocity = np.loadtxt(exp_data)
plt.plot(experimental_velocity[:, 0], experimental_velocity[:, 1], "--",
color="black", label="Experiment")
if __name__ == "__main__":
run()
# ----------------------------------------------------------
# Show figure
# ----------------------------------------------------------
plt.legend(loc="best")
plt.show()
| gpl-3.0 |
ExeClim/Isca | src/extra/python/scripts/gfdl_grid_files/grid_file_generator.py | 4 | 1170 | import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# specify resolution
t_res = 42
#read in grid from approriate file
resolution_file = Dataset('t'+str(t_res)+'_atmos_daily.nc', 'r', format='NETCDF3_CLASSIC')
lons = resolution_file.variables['lon'][:]
lats = resolution_file.variables['lat'][:]
lonsb = resolution_file.variables['lonb'][:]
latsb = resolution_file.variables['latb'][:]
nlon=lons.shape[0]
nlat=lats.shape[0]
nlonb=lonsb.shape[0]
nlatb=latsb.shape[0]
output_file = Dataset('t'+str(t_res)+'.nc', 'w', format='NETCDF3_CLASSIC')
lat = output_file.createDimension('lat', nlat)
lon = output_file.createDimension('lon', nlon)
latb = output_file.createDimension('latb', nlatb)
lonb = output_file.createDimension('lonb', nlonb)
latitudes = output_file.createVariable('lat','f4',('lat',))
longitudes = output_file.createVariable('lon','f4',('lon',))
latitudesb = output_file.createVariable('latb','f4',('latb',))
longitudesb = output_file.createVariable('lonb','f4',('lonb',))
latitudes[:] = lats
longitudes[:] = lons
latitudesb[:] = latsb
longitudesb[:] = lonsb
output_file.close()
| gpl-3.0 |
bundgus/python-playground | matplotlib-playground/examples/animation/bayes_update.py | 1 | 1468 | # update a distribution based on new data.
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as ss
from matplotlib.animation import FuncAnimation
class UpdateDist(object):
def __init__(self, ax, prob=0.5):
self.success = 0
self.prob = prob
self.line, = ax.plot([], [], 'k-')
self.x = np.linspace(0, 1, 200)
self.ax = ax
# Set up plot parameters
self.ax.set_xlim(0, 1)
self.ax.set_ylim(0, 15)
self.ax.grid(True)
# This vertical line represents the theoretical value, to
# which the plotted distribution should converge.
self.ax.axvline(prob, linestyle='--', color='black')
def init(self):
self.success = 0
self.line.set_data([], [])
return self.line,
def __call__(self, i):
# This way the plot can continuously run and we just keep
# watching new realizations of the process
if i == 0:
return self.init()
# Choose success based on exceed a threshold with a uniform pick
if np.random.rand(1,) < self.prob:
self.success += 1
y = ss.beta.pdf(self.x, self.success + 1, (i - self.success) + 1)
self.line.set_data(self.x, y)
return self.line,
fig, ax = plt.subplots()
ud = UpdateDist(ax, prob=0.7)
anim = FuncAnimation(fig, ud, frames=np.arange(100), init_func=ud.init,
interval=100, blit=True)
plt.show()
| mit |
0x0all/scikit-learn | sklearn/__init__.py | 12 | 2540 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
__version__ = '0.16-git'
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search', 'hmm',
'isotonic', 'kernel_approximation', 'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs
"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
cl4rke/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/numpy/lib/function_base.py | 30 | 124613 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from ._compiled_base import _insert, add_docstring
from ._compiled_base import digitize, bincount, interp as compiled_interp
from ._compiled_base import add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for axis in range(N):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[axis]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X -= X.mean(axis=1-axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
if axis is not None and axis >= a.ndim:
raise IndexError(
"axis %d out of bounds (%d)" % (axis, a.ndim))
if overwrite_input:
if axis is None:
part = a.ravel()
sz = part.size
if sz % 2 == 0:
szh = sz // 2
part.partition((szh - 1, szh))
else:
part.partition((sz - 1) // 2)
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
a.partition((szh - 1, szh), axis=axis)
else:
a.partition((sz - 1) // 2, axis=axis)
part = a
else:
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis)
else:
part = partition(a, (sz - 1) // 2, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
if indices.dtype == intp: # take the points along axis
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays together.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| mit |
hsuantien/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
giacomov/3ML | threeML/minimizer/minimization.py | 1 | 40378 | from __future__ import division
from builtins import zip
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
import collections
import math
import numpy as np
import pandas as pd
import scipy.optimize
from threeML.io.progress_bar import progress_bar
from threeML.exceptions.custom_exceptions import custom_warnings
from threeML.utils.differentiation import get_hessian, ParameterOnBoundary
# Set the warnings to be issued always for this module
custom_warnings.simplefilter("always", RuntimeWarning)
# Special constants
FIT_FAILED = 1e12
# Define a bunch of custom exceptions relevant for what is being accomplished here
class CannotComputeCovariance(RuntimeWarning):
pass
class CannotComputeErrors(RuntimeWarning):
pass
class ParameterIsNotFree(Exception):
pass
class FitFailed(Exception):
pass
class MinimizerNotAvailable(Exception):
pass
class BetterMinimumDuringProfiling(RuntimeWarning):
pass
# This will contain the available minimizers
_minimizers = {}
def get_minimizer(minimizer_type):
"""
Return the requested minimizer *class* (not instance)
:param minimizer_type: MINUIT, ROOT, PYOPT...
:return: the class (i.e., the type) for the requested minimizer
"""
try:
return _minimizers[minimizer_type.upper()]
except KeyError:
raise MinimizerNotAvailable(
"Minimizer %s is not available on your system" % minimizer_type
)
class FunctionWrapper(object):
def __init__(self, function, all_parameters, fixed_parameters):
"""
:param function:
:param all_parameters:
:param fixed_parameters: list of fixed parameters
"""
self._function = function
self._all_parameters = all_parameters
self._fixed_parameters_values = np.zeros(len(fixed_parameters))
self._fixed_parameters_names = fixed_parameters
self._indexes_of_fixed_par = np.zeros(len(self._all_parameters), bool)
for i, parameter_name in enumerate(self._fixed_parameters_names):
this_index = list(self._all_parameters.keys()).index(parameter_name)
self._indexes_of_fixed_par[this_index] = True
self._all_values = np.zeros(len(self._all_parameters))
def set_fixed_values(self, new_fixed_values):
# Note that this will receive the fixed values in internal reference (after the transformations, if any)
# A use [:] so there is an implicit check on the right size of new_fixed_values
self._fixed_parameters_values[:] = new_fixed_values
def __call__(self, *trial_values):
# Note that this function will receive the trial values in internal reference (after the transformations,
# if any)
self._all_values[self._indexes_of_fixed_par] = self._fixed_parameters_values
self._all_values[~self._indexes_of_fixed_par] = trial_values
return self._function(*self._all_values)
class ProfileLikelihood(object):
def __init__(self, minimizer_instance, fixed_parameters):
self._fixed_parameters = fixed_parameters
assert (
len(self._fixed_parameters) <= 2
), "Can handle only one or two fixed parameters"
# Get some info from the original minimizer
self._function = minimizer_instance.function
# Note that here we have to use the original parameters (not the internal parameters)
self._all_parameters = minimizer_instance.parameters
# Create a copy of the dictionary of parameters
free_parameters = collections.OrderedDict(self._all_parameters)
# Remove the fixed ones
for parameter_name in fixed_parameters:
free_parameters.pop(parameter_name)
# Now compute how many free parameters we have
self._n_free_parameters = len(free_parameters)
if self._n_free_parameters > 0:
self._wrapper = FunctionWrapper(
self._function, self._all_parameters, self._fixed_parameters
)
# Create a copy of the optimizer with the new parameters (i.e., one or two
# parameters fixed to their current values)
self._optimizer = type(minimizer_instance)(
self._wrapper, free_parameters, verbosity=0
)
if minimizer_instance.algorithm_name is not None:
self._optimizer.set_algorithm(minimizer_instance.algorithm_name)
else:
# Special case when there are no free parameters after fixing the requested ones
# There is no profiling necessary here
self._wrapper = None
self._optimizer = None
def _transform_steps(self, parameter_name, steps):
"""
If the parameter has a transformation, use it for the steps and return the transformed steps
:return: transformed steps
"""
if self._all_parameters[parameter_name].has_transformation():
new_steps = self._all_parameters[parameter_name].transformation.forward(
steps
)
return new_steps
else:
# Nothing to do
return steps
def step(self, steps1, steps2=None):
if steps2 is not None:
assert (
len(self._fixed_parameters) == 2
), "Cannot step in 2d if you fix only one parameter"
# Find out if the user is giving flipped steps (i.e. param_1 is after param_2 in the
# parameters dictionary)
param_1_name = self._fixed_parameters[0]
param_1_idx = list(self._all_parameters.keys()).index(param_1_name)
param_2_name = self._fixed_parameters[1]
param_2_idx = list(self._all_parameters.keys()).index(param_2_name)
# Fix steps if needed
steps1 = self._transform_steps(param_1_name, steps1)
if steps2 is not None:
steps2 = self._transform_steps(param_2_name, steps2)
if param_1_idx > param_2_idx:
# Switch steps
swap = steps1
steps1 = steps2
steps2 = swap
results = self._step2d(steps1, steps2).T
else:
results = self._step2d(steps1, steps2)
return results
else:
assert (
len(self._fixed_parameters) == 1
), "You cannot step in 1d if you fix 2 parameters"
param_1_name = self._fixed_parameters[0]
# Fix steps if needed.
steps1 = self._transform_steps(param_1_name, steps1)
return self._step1d(steps1)
def __call__(self, values):
self._wrapper.set_fixed_values(values)
_, this_log_like = self._optimizer.minimize(compute_covar=False)
return this_log_like
def _step1d(self, steps1):
log_likes = np.zeros_like(steps1)
with progress_bar(len(steps1), title="Profiling likelihood") as p:
for i, step in enumerate(steps1):
if self._n_free_parameters > 0:
# Profile out the free parameters
self._wrapper.set_fixed_values(step)
_, this_log_like = self._optimizer.minimize(compute_covar=False)
else:
# No free parameters, just compute the likelihood
this_log_like = self._function(step)
log_likes[i] = this_log_like
p.increase()
return log_likes
def _step2d(self, steps1, steps2):
log_likes = np.zeros((len(steps1), len(steps2)))
with progress_bar(len(steps1) * len(steps2), title="Profiling likelihood") as p:
for i, step1 in enumerate(steps1):
for j, step2 in enumerate(steps2):
if self._n_free_parameters > 0:
# Profile out the free parameters
self._wrapper.set_fixed_values([step1, step2])
try:
_, this_log_like = self._optimizer.minimize(
compute_covar=False
)
except FitFailed:
# If the user is stepping too far it might be that the fit fails. It is usually not a
# problem
this_log_like = np.nan
else:
# No free parameters, just compute the likelihood
this_log_like = self._function(step1, step2)
log_likes[i, j] = this_log_like
p.increase()
return log_likes
# This classes are used directly by the user to have better control on the minimizers.
# They are actually factories
class _Minimization(object):
def __init__(self, minimizer_type):
self._minimizer_type = get_minimizer(minimizer_type=minimizer_type)
self._algorithm = None
self._setup_dict = {}
def setup(self, **setup_dict):
valid_setup_keys = self._minimizer_type.valid_setup_keys
# Check that the setup has been specified well
for key in list(setup_dict.keys()):
assert key in valid_setup_keys, (
"%s is not a valid setup parameter for this minimizer" % key
)
self._setup_dict = setup_dict
def set_algorithm(self, algorithm):
# Note that algorithm might be None
self._algorithm = algorithm
class LocalMinimization(_Minimization):
def __init__(self, minimizer_type):
super(LocalMinimization, self).__init__(minimizer_type)
assert issubclass(self._minimizer_type, LocalMinimizer), (
"Minimizer %s is not a local minimizer" % minimizer_type
)
def get_instance(self, *args, **kwargs):
instance = self._minimizer_type(*args, **kwargs)
if self._algorithm is not None:
instance.set_algorithm(self._algorithm)
# Set up the minimizer
instance._setup(self._setup_dict)
return instance
class GlobalMinimization(_Minimization):
def __init__(self, minimizer_type):
super(GlobalMinimization, self).__init__(minimizer_type)
assert issubclass(self._minimizer_type, GlobalMinimizer), (
"Minimizer %s is not a local minimizer" % minimizer_type
)
self._2nd_minimization = None
def setup(self, **setup_dict):
assert "second_minimization" in setup_dict, (
"You have to provide a secondary minimizer during setup, "
"using the second_minimization keyword"
)
self._2nd_minimization = setup_dict["second_minimization"]
super(GlobalMinimization, self).setup(**setup_dict)
def get_second_minimization_instance(self, *args, **kwargs):
return self._2nd_minimization.get_instance(*args, **kwargs)
def get_instance(self, *args, **kwargs):
instance = self._minimizer_type(*args, **kwargs)
if self._algorithm is not None:
instance.set_algorithm(self._algorithm)
# Set up the minimizer
instance._setup(self._setup_dict)
return instance
class Minimizer(object):
def __init__(self, function, parameters, verbosity=1, setup_dict=None):
"""
:param function: function to be minimized
:param parameters: ordered dictionary of the FREE parameters in the fit. The order must be the same as
in the calling sequence of the function to be minimized.
:param verbosity: control the verbosity of the output
:param type: type of the optimizer (use the enums LOCAL_OPTIMIZER or GLOBAL_OPTIMIZER)
:return:
"""
self._function = function
self._external_parameters = parameters
self._internal_parameters = self._update_internal_parameter_dictionary()
self._Npar = len(list(self.parameters.keys()))
self._verbosity = verbosity
self._setup(setup_dict)
self._fit_results = None
self._covariance_matrix = None
self._correlation_matrix = None
self._algorithm_name = None
self._m_log_like_minimum = None
self._optimizer_type = str(type)
def _update_internal_parameter_dictionary(self):
"""
Returns a dictionary parameter_name -> (current value, delta, minimum, maximum) in the internal frame
(if the parameter has a transformation set).
This should be used by the implementation of the minimizers to get the parameters to optimize.
:return: dictionary
"""
# Prepare the dictionary for the parameters which will be used by iminuit
internal_parameter_dictionary = collections.OrderedDict()
# NOTE: we use the internal_ versions of value, min_value and max_value because they don't have
# units, and they are transformed to make the fit easier (for example in log scale)
# NOTE as well that as in the entire class here, the .parameters dictionary only contains free parameters,
# as only free parameters are passed to the constructor of the minimizer
for k, par in self.parameters.items():
current_name = par.path
current_value = par._get_internal_value()
current_delta = par._get_internal_delta()
current_min = par._get_internal_min_value()
current_max = par._get_internal_max_value()
# Now fix sensible values for parameters deltas
if current_min is None and current_max is None:
# No boundaries, use 2% of value as initial delta
if abs(current_delta) < abs(current_value) * 0.02 or not np.isfinite(
current_delta
):
current_delta = abs(current_value) * 0.02
elif current_min is not None:
if current_max is not None:
# Bounded in both directions. Use 20% of the value
current_delta = abs(current_value) * 0.02
# Make sure we do not violate the boundaries
current_delta = min(
current_delta,
abs(current_value - current_delta) / 10.0,
abs(current_value + current_delta) / 10.0,
)
else:
# Bounded only in the negative direction. Make sure we are not at the boundary
if np.isclose(
current_value, current_min, old_div(abs(current_value), 20)
):
custom_warnings.warn(
"The current value of parameter %s is very close to "
"its lower bound when starting the fit. Fixing it"
% par.name
)
current_value = current_value + 0.1 * abs(current_value)
current_delta = 0.05 * abs(current_value)
else:
current_delta = min(
current_delta, abs(current_value - current_min) / 10.0
)
else:
if current_max is not None:
# Bounded only in the positive direction
# Bounded only in the negative direction. Make sure we are not at the boundary
if np.isclose(
current_value, current_max, old_div(abs(current_value), 20)
):
custom_warnings.warn(
"The current value of parameter %s is very close to "
"its upper bound when starting the fit. Fixing it"
% par.name
)
current_value = current_value - 0.04 * abs(current_value)
current_delta = 0.02 * abs(current_value)
else:
current_delta = min(
current_delta, abs(current_max - current_value) / 2.0
)
# Sometimes, if the value was 0, the delta could be 0 as well which would crash
# certain algorithms
if current_value == 0:
current_delta = 0.1
internal_parameter_dictionary[current_name] = (
current_value,
current_delta,
current_min,
current_max,
)
return internal_parameter_dictionary
@property
def function(self):
return self._function
@property
def parameters(self):
return self._external_parameters
@property
def Npar(self):
return self._Npar
@property
def verbosity(self):
return self._verbosity
def _setup(self, setup_dict):
raise NotImplementedError("You have to implement this.")
@property
def algorithm_name(self):
return self._algorithm_name
def minimize(self, compute_covar=True):
"""
Minimize objective function. This call _minimize, which is implemented by each subclass.
:param compute_covar:
:return: best fit values (in external reference) and minimum of the objective function
"""
# Gather the best fit values from the minimizer and the covariance matrix (if provided)
try:
internal_best_fit_values, function_minimum = self._minimize()
except FitFailed:
raise
# Check that all values are finite
# Check that the best_fit_values are finite
if not np.all(np.isfinite(internal_best_fit_values)):
raise FitFailed(
"_Minimization apparently succeeded, "
"but best fit values are not all finite: %s"
% (internal_best_fit_values)
)
# Now set the internal values of the parameters to their best fit values and collect the
# values in external reference
external_best_fit_values = []
for i, parameter in enumerate(self.parameters.values()):
parameter._set_internal_value(internal_best_fit_values[i])
external_best_fit_values.append(parameter.value)
# Now compute the covariance matrix, if requested
if compute_covar:
covariance = self._compute_covariance_matrix(internal_best_fit_values)
else:
covariance = None
# Finally store everything
self._store_fit_results(internal_best_fit_values, function_minimum, covariance)
return external_best_fit_values, function_minimum
def _minimize(self):
# This should return the list of best fit parameters and the minimum of the function
raise NotImplemented(
"This is the method of the base class. Must be implemented by the actual minimizer"
)
def set_algorithm(self, algorithm):
raise NotImplementedError(
"Must be implemented by the actual minimizer if it provides more than one algorithm"
)
def _store_fit_results(
self, best_fit_values, m_log_like_minimum, covariance_matrix=None
):
self._m_log_like_minimum = m_log_like_minimum
# Create a pandas DataFrame with the fit results
values = collections.OrderedDict()
errors = collections.OrderedDict()
# to become compatible with python3
keys_list = list(self.parameters.keys())
parameters_list = list(self.parameters.values())
for i in range(self.Npar):
name = keys_list[i]
value = best_fit_values[i]
# Set the parameter to the best fit value (sometimes the optimization happen in a different thread/node,
# so we need to make sure that the parameter has the best fit value)
parameters_list[i]._set_internal_value(value)
if covariance_matrix is not None:
element = covariance_matrix[i, i]
if element > 0:
error = math.sqrt(covariance_matrix[i, i])
else:
custom_warnings.warn(
"Negative element on diagonal of covariance matrix",
CannotComputeErrors,
)
error = np.nan
else:
error = np.nan
values[name] = value
errors[name] = error
data = collections.OrderedDict()
data["value"] = pd.Series(values)
data["error"] = pd.Series(errors)
self._fit_results = pd.DataFrame(data)
self._covariance_matrix = covariance_matrix
# Compute correlation matrix
self._correlation_matrix = np.zeros_like(self._covariance_matrix)
if covariance_matrix is not None:
for i in range(self.Npar):
variance_i = self._covariance_matrix[i, i]
for j in range(self.Npar):
variance_j = self._covariance_matrix[j, j]
if variance_i * variance_j > 0:
self._correlation_matrix[i, j] = old_div(
self._covariance_matrix[i, j],
(math.sqrt(variance_i * variance_j)),
)
else:
# We already issued a warning about this, so let's quietly fail
self._correlation_matrix[i, j] = np.nan
@property
def fit_results(self):
return self._fit_results
@property
def covariance_matrix(self):
return self._covariance_matrix
@property
def correlation_matrix(self):
return self._correlation_matrix
def restore_best_fit(self):
"""
Reset all the parameters to their best fit value (from the last run fit)
:return: none
"""
best_fit_values = self._fit_results["value"].values
for parameter_name, best_fit_value in zip(
list(self.parameters.keys()), best_fit_values
):
self.parameters[parameter_name]._set_internal_value(best_fit_value)
# Regenerate the internal parameter dictionary with the new values
self._internal_parameters = self._update_internal_parameter_dictionary()
def _compute_covariance_matrix(self, best_fit_values):
"""
This function compute the approximate covariance matrix as the inverse of the Hessian matrix,
which is the matrix of second derivatives of the likelihood function with respect to
the parameters.
The sqrt of the diagonal of the result is an accurate estimate of the errors only if the
log.likelihood is parabolic in the neighborhood of the minimum.
Derivatives are computed numerically.
:return: the covariance matrix
"""
minima = [
parameter._get_internal_min_value()
for parameter in list(self.parameters.values())
]
maxima = [
parameter._get_internal_max_value()
for parameter in list(self.parameters.values())
]
# Check whether some of the minima or of the maxima are None. If they are, set them
# to a value 1000 times smaller or larger respectively than the best fit.
# An error of 3 orders of magnitude is not interesting in general, and this is the only
# way to be able to compute a derivative numerically
for i in range(len(minima)):
if minima[i] is None:
minima[i] = best_fit_values[i] / 1000.0
if maxima[i] is None:
maxima[i] = best_fit_values[i] * 1000.0
# Transform them in np.array
minima = np.array(minima)
maxima = np.array(maxima)
try:
hessian_matrix = get_hessian(self.function, best_fit_values, minima, maxima)
except ParameterOnBoundary:
custom_warnings.warn(
"One or more of the parameters are at their boundaries. Cannot compute covariance and"
" errors",
CannotComputeCovariance,
)
n_dim = len(best_fit_values)
return np.zeros((n_dim, n_dim)) * np.nan
# Invert it to get the covariance matrix
try:
covariance_matrix = np.linalg.inv(hessian_matrix)
except:
custom_warnings.warn(
"Cannot invert Hessian matrix, looks like the matrix is singular"
)
n_dim = len(best_fit_values)
return np.zeros((n_dim, n_dim)) * np.nan
# Now check that the covariance matrix is semi-positive definite (it must be unless
# there have been numerical problems, which can happen when some parameter is unconstrained)
# The fastest way is to try and compute the Cholesky decomposition, which
# works only if the matrix is positive definite
try:
_ = np.linalg.cholesky(covariance_matrix)
except:
custom_warnings.warn(
"Covariance matrix is NOT semi-positive definite. Cannot estimate errors. This can "
"happen for many reasons, the most common being one or more unconstrained parameters",
CannotComputeCovariance,
)
return covariance_matrix
def _get_one_error(self, parameter_name, target_delta_log_like, sign=-1):
"""
A generic procedure to numerically compute the error for the parameters. You can override this if the
minimizer provides its own method to compute the error of one parameter. If it provides a method to compute
all errors are once, override the _get_errors method instead.
:param parameter_name:
:param target_delta_log_like:
:param sign:
:return:
"""
# Since the procedure might find a better minimum, we can repeat it
# up to a maximum of 10 times
repeats = 0
while repeats < 10:
# Let's start optimistic...
repeat = False
repeats += 1
# Restore best fit (which also updates the internal parameter dictionary)
self.restore_best_fit()
(
current_value,
current_delta,
current_min,
current_max,
) = self._internal_parameters[parameter_name]
best_fit_value = current_value
if sign == -1:
extreme_allowed = current_min
else:
extreme_allowed = current_max
# If the parameter has no boundary in the direction we are sampling, put a hard limit on
# 10 times the current value (to avoid looping forever)
if extreme_allowed is None:
extreme_allowed = best_fit_value + sign * 10 * abs(best_fit_value)
# We need to look for a value for the parameter where the difference between the minimum of the
# log-likelihood and the likelihood for that value differs by more than target_delta_log_likelihood.
# This is needed by the root-finding procedure, which needs to know an interval where the biased likelihood
# function (see below) changes sign
trials = best_fit_value + sign * np.linspace(0.1, 0.9, 9) * abs(
best_fit_value
)
trials = np.append(trials, extreme_allowed)
# Make sure we don't go below the allowed minimum or above the allowed maximum
if sign == -1:
np.clip(trials, extreme_allowed, np.inf, trials)
else:
np.clip(trials, -np.inf, extreme_allowed, trials)
# There might be more than one value which was below the minimum (or above the maximum), so let's
# take only unique elements
trials = np.unique(trials)
trials.sort()
if sign == -1:
trials = trials[::-1]
# At this point we have a certain number of unique trials which always
# contain the allowed minimum (or maximum)
minimum_bound = None
maximum_bound = None
# Instance the profile likelihood function
pl = ProfileLikelihood(self, [parameter_name])
for i, trial in enumerate(trials):
this_log_like = pl([trial])
delta = this_log_like - self._m_log_like_minimum
if delta < -0.1:
custom_warnings.warn(
"Found a better minimum (%.2f) for %s = %s during error "
"computation." % (this_log_like, parameter_name, trial),
BetterMinimumDuringProfiling,
)
xs = [x.value for x in list(self.parameters.values())]
self._store_fit_results(xs, this_log_like, None)
repeat = True
break
if delta > target_delta_log_like:
bound1 = trial
if i > 0:
bound2 = trials[i - 1]
else:
bound2 = best_fit_value
minimum_bound = min(bound1, bound2)
maximum_bound = max(bound1, bound2)
repeat = False
break
if repeat:
# We found a better minimum, restart from scratch
custom_warnings.warn("Restarting search...", RuntimeWarning)
continue
if minimum_bound is None:
# Cannot find error in this direction (it's probably outside the allowed boundaries)
custom_warnings.warn(
"Cannot find boundary for parameter %s" % parameter_name,
CannotComputeErrors,
)
error = np.nan
break
else:
# Define the "biased likelihood", since brenq only finds zeros of function
biased_likelihood = (
lambda x: pl(x) - self._m_log_like_minimum - target_delta_log_like
)
try:
precise_bound = scipy.optimize.brentq(
biased_likelihood,
minimum_bound,
maximum_bound,
xtol=1e-5,
maxiter=1000,
) # type: float
except:
custom_warnings.warn(
"Cannot find boundary for parameter %s" % parameter_name,
CannotComputeErrors,
)
error = np.nan
break
error = precise_bound - best_fit_value
break
return error
def get_errors(self):
"""
Compute asymmetric errors using the profile likelihood method (slow, but accurate).
:return: a dictionary with asymmetric errors for each parameter
"""
# Restore best fit so error computation starts from there
self.restore_best_fit()
# Get errors
errors_dict = self._get_errors()
# Transform in external reference if needed
best_fit_values = self._fit_results["value"]
for par_name, (negative_error, positive_error) in errors_dict.items():
parameter = self.parameters[par_name]
if parameter.has_transformation():
_, negative_error_external = parameter.internal_to_external_delta(
best_fit_values[parameter.path], negative_error
)
_, positive_error_external = parameter.internal_to_external_delta(
best_fit_values[parameter.path], positive_error
)
errors_dict[par_name] = (
negative_error_external,
positive_error_external,
)
else:
# No need to transform
pass
return errors_dict
def _get_errors(self):
"""
Override this method if the minimizer provide a function to get all errors at once. If instead it provides
a method to get one error at the time, override the _get_one_error method
:return: a ordered dictionary parameter_path -> (negative_error, positive_error)
"""
# TODO: options for other significance levels
target_delta_log_like = 0.5
errors = collections.OrderedDict()
with progress_bar(2 * len(self.parameters), title="Computing errors") as p:
for parameter_name in self.parameters:
negative_error = self._get_one_error(
parameter_name, target_delta_log_like, -1
)
p.increase()
positive_error = self._get_one_error(
parameter_name, target_delta_log_like, +1
)
p.increase()
errors[parameter_name] = (negative_error, positive_error)
return errors
def contours(
self,
param_1,
param_1_minimum,
param_1_maximum,
param_1_n_steps,
param_2=None,
param_2_minimum=None,
param_2_maximum=None,
param_2_n_steps=None,
progress=True,
**options
):
"""
Generate confidence contours for the given parameters by stepping for the given number of steps between
the given boundaries. Call it specifying only source_1, param_1, param_1_minimum and param_1_maximum to
generate the profile of the likelihood for parameter 1. Specify all parameters to obtain instead a 2d
contour of param_1 vs param_2
:param param_1: name of the first parameter
:param param_1_minimum: lower bound for the range for the first parameter
:param param_1_maximum: upper bound for the range for the first parameter
:param param_1_n_steps: number of steps for the first parameter
:param param_2: name of the second parameter
:param param_2_minimum: lower bound for the range for the second parameter
:param param_2_maximum: upper bound for the range for the second parameter
:param param_2_n_steps: number of steps for the second parameter
:param progress: (True or False) whether to display progress or not
:param log: by default the steps are taken linearly. With this optional parameter you can provide a tuple of
booleans which specify whether the steps are to be taken logarithmically. For example,
'log=(True,False)' specify that the steps for the first parameter are to be taken logarithmically, while they
are linear for the second parameter. If you are generating the profile for only one parameter, you can specify
'log=(True,)' or 'log=(False,)' (optional)
:param: parallel: whether to use or not parallel computation (default:False)
:return: a : an array corresponding to the steps for the first parameter
b : an array corresponding to the steps for the second parameter (or None if stepping only in one
direction)
contour : a matrix of size param_1_steps x param_2_steps containing the value of the function at the
corresponding points in the grid. If param_2_steps is None (only one parameter), then this reduces to
an array of size param_1_steps.
"""
# Figure out if we are making a 1d or a 2d contour
if param_2 is None:
n_dimensions = 1
fixed_parameters = [param_1]
else:
n_dimensions = 2
fixed_parameters = [param_1, param_2]
# Check the options
p1log = False
p2log = False
if "log" in list(options.keys()):
assert len(options["log"]) == n_dimensions, (
"When specifying the 'log' option you have to provide a "
+ "boolean for each dimension you are stepping on."
)
p1log = bool(options["log"][0])
if param_2 is not None:
p2log = bool(options["log"][1])
# Generate the steps
if p1log:
param_1_steps = np.logspace(
math.log10(param_1_minimum),
math.log10(param_1_maximum),
param_1_n_steps,
)
else:
param_1_steps = np.linspace(
param_1_minimum, param_1_maximum, param_1_n_steps
)
if n_dimensions == 2:
if p2log:
param_2_steps = np.logspace(
math.log10(param_2_minimum),
math.log10(param_2_maximum),
param_2_n_steps,
)
else:
param_2_steps = np.linspace(
param_2_minimum, param_2_maximum, param_2_n_steps
)
else:
# Only one parameter to step through
# Put param_2_steps as nan so that the worker can realize that it does not have
# to step through it
param_2_steps = np.array([np.nan])
# Define the worker which will compute the value of the function at a given point in the grid
# Restore best fit
if self.fit_results is not None:
self.restore_best_fit()
else:
custom_warnings.warn(
"No best fit to restore before contours computation. "
"Perform the fit before running contours to remove this warnings."
)
pr = ProfileLikelihood(self, fixed_parameters)
if n_dimensions == 1:
results = pr.step(param_1_steps)
else:
results = pr.step(param_1_steps, param_2_steps)
# Return results
return (
param_1_steps,
param_2_steps,
np.array(results).reshape((param_1_steps.shape[0], param_2_steps.shape[0])),
)
class LocalMinimizer(Minimizer):
pass
class GlobalMinimizer(Minimizer):
pass
# Check which minimizers are available
try:
from threeML.minimizer.minuit_minimizer import MinuitMinimizer
except ImportError:
custom_warnings.warn("Minuit minimizer not available", ImportWarning)
else:
_minimizers["MINUIT"] = MinuitMinimizer
try:
from threeML.minimizer.ROOT_minimizer import ROOTMinimizer
except ImportError:
custom_warnings.warn("ROOT minimizer not available", ImportWarning)
else:
_minimizers["ROOT"] = ROOTMinimizer
try:
from threeML.minimizer.multinest_minimizer import MultinestMinimizer
except ImportError:
custom_warnings.warn("Multinest minimizer not available", ImportWarning)
else:
_minimizers["MULTINEST"] = MultinestMinimizer
try:
from threeML.minimizer.pagmo_minimizer import PAGMOMinimizer
except ImportError:
custom_warnings.warn("PyGMO is not available", ImportWarning)
else:
_minimizers["PAGMO"] = PAGMOMinimizer
try:
from threeML.minimizer.scipy_minimizer import ScipyMinimizer
except ImportError:
custom_warnings.warn("Scipy minimizer is not available", ImportWarning)
else:
_minimizers["SCIPY"] = ScipyMinimizer
# Check that we have at least one minimizer available
if len(_minimizers) == 0:
raise SystemError(
"You do not have any minimizer available! You need to install at least iminuit."
)
# Add the GRID minimizer here since it needs at least one other minimizer
from threeML.minimizer.grid_minimizer import GridMinimizer
_minimizers["GRID"] = GridMinimizer
| bsd-3-clause |
tcompa/anneal | examples/generalized_linear_model/anneal_glm.py | 1 | 2393 | '''
program: anneal_glm.py
author: tc
created: 2016-04-19 -- 10 CEST
notes: performs one simulated-annealing run, for a GLM problem.
'''
import numpy
import matplotlib.pyplot as plt
from anneal import simulated_annealing
from lib_generalized_linear_model import GLM
# define D-dimensional General Linear Model: Y=M*X+Q
D = 25
Q = numpy.random.uniform(-1.0, 1.0, D)
M = numpy.random.uniform(-1.0, 1.0, size=(D, D))
# generate noisy data
Npoints = 100
eps = 0.1
X = numpy.random.uniform(-1.0, 1.0, (Npoints, D))
Y = numpy.matmul(X, M) + Q
Y += numpy.random.uniform(-eps, eps, size=Y.shape)
# initialize GLM instance
G = GLM(X, Y)
# perform annealing
print 'SA start'
G, E, et = simulated_annealing(G, 'GLM',
beta_min=1e-1, beta_max=1e4,
cooling_rate=5e-2, n_steps_per_T=200,
quench_to_T0=True, n_steps_T0=10000)
print 'SA end (elapsed: %.1f s)' % et
# plot energy
fig, ax = plt.subplots(1, 1)
ax.plot(E)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('time')
ax.set_ylabel('energy')
plt.savefig('fig_energy.png', bbox_inches='tight')
plt.close()
# plot M
fig, axs = plt.subplots(2, 2, figsize=(10, 10))
ax1, ax2, ax3, ax4 = axs.flatten()
vmin = min(M.min(), G.M0.min(), G.M.min()) / 1.05
vmax = max(M.max(), G.M0.max(), G.M.max()) * 1.05
props = {'cmap': 'viridis', 'vmin': vmin, 'vmax': vmax, 'origin': 'lower'}
im1 = ax1.matshow(M, **props)
im2 = ax2.matshow(G.M, **props)
im3 = ax3.matshow(G.M0, **props)
im4 = ax4.matshow(M - G.M, **props)
ax1.set_title('\"true\"')
ax2.set_title('final')
ax3.set_title('starting')
ax4.set_title('diff')
for ax in axs.flatten():
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_label_position('bottom')
ax_cb = fig.add_axes([0.25, 0.95, 0.5, 0.025])
CB = fig.colorbar(im1, cax=ax_cb, orientation='horizontal')
CB.ax.tick_params(labelsize=10, direction='in',
labeltop='on', labelbottom='off')
CB.ax.xaxis.set_ticks_position('top')
CB.ax.xaxis.set_label_position('top')
CB.ax.xaxis.labelpad = 10
plt.savefig('fig_matrix.png', bbox_inches='tight', dpi=128)
plt.close()
# plot Q
plt.plot(G.Q0, 'o-', label='starting')
plt.plot(Q, 'x-', ms=8, label='\"true\"')
plt.plot(G.Q, 'o-', ms=5, label='final')
plt.legend(loc='best')
plt.xlabel('component')
plt.title('offset Q')
plt.savefig('fig_offset.png', bbox_inches='tight')
| mit |
CosmoJG/neural-heatmap | dependencies/NeuronGeometry.py | 1 | 98029 | #!/usr/bin/python
_usageStr=\
"""usage: neuron_readExportedGeometry.py geoFile
get dictionary object describing neuron model geometry info by reading file
"""
import os
from scipy import special, mean, std
from collections import deque
import matplotlib.pyplot as pyplot
from math import log, sqrt, atan, isnan, pi, acos
from bisect import bisect_left
import numpy as np
"""
Geometry class public methods: (self is always first argument)
setFileName(_fileName)
numCompartments()
readGeometry() pure virtual
displaySummary()
findBranches()
checkConnectivity()
shollAnalysis()
"""
terminalColors = {
'endColor' : '\033[0m',
'black' : '\033[0;30m',
'red' : '\033[0;31m',
'green' : '\033[0;32m',
'yellow' : '\033[0;33m',
'blue' : '\033[0;34m',
'purple' : '\033[0;35m',
'cyan' : '\033[0;36m',
'lightGray' : '\033[0;37m',
'darkGray' : '\033[1;30m',
'boldRed' : '\033[1;31m',
'boldGreen' : '\033[1;32m',
'boldYellow' : '\033[1;33m',
'boldBlue' : '\033[1;34m',
'boldPurple' : '\033[1;35m',
'boldCyan' : '\033[1;36m',
'white' : '\033[1;37m'
}
def warn(warnStr, extraInfo='', color='boldRed'):
if extraInfo:
print(terminalColors[color] + warnStr + terminalColors['endColor']
+ ': ' + extraInfo)
else:
print(terminalColors[color] + warnStr + terminalColors['endColor'])
def cumsum(values, start=0.0, yieldStart=True):
"""
Return generator to running cumulative sum of values
"""
if yieldStart:
yield start
for v in values:
start += v
yield start
"""
class PathDistanceFinder
Finds network distances from one segment/branch at specified position
Can report distance to another segment/branch at specified position via
.distanceTo()
Can report optimal path to another segment/branch at specified position va
.pathTo()
Can report tortuosity of optimal path via
.tortuosityTo()
Can compute electrotonic lengths from a list of voltages via
.getElectrotonicLengths()
"""
class PathDistanceFinder(object):
def __init__(self, geometry, segment, pos=0.5, warnLoops=False):
self.geometry = geometry
if type(segment) == int:
segment = geometry.segments[segment]
self.network = geometry.segments
elif segment in geometry.segments:
self.network = geometry.segments
elif segment in geometry.branches:
self.network = geometry.branches
else:
raise TypeError('Start segment must be an index to geometry.segments, or'
' an object from geometry.segments or geometry.branches')
self.warnLoops = warnLoops
self.startSegment = segment
self.startPos = pos
self.startCoord = segment.coordAt(pos)
self.distances, self.branchOrders = self._computeDistances()
def distanceTo(self, segment, pos=0.5):
# return distance to specified segment at the specified location
if type(segment) == int:
segment = self.network[segment]
if segment not in self.distances:
raise KeyError('%s is not reachable from the network' % segment.name)
return min(baseD + segment.length * abs(pos - startPos) for
baseD, startPos, pathDesc, path in self.distances[segment])
def pathTo(self, segment, pos=0.5):
# return optimal path to specified segment at specified location
return min(((baseD + segment.length * abs(pos - startPos), path) for
baseD, startPos, pathDesc, path in self.distances[segment]),
key=lambda x:x[0])[1]
def pathDescriptionTo(self, segment, pos=0.5):
# return optimal path to specified segment at specified location
return min(((baseD + segment.length * abs(pos - startPos), pathDesc) for
baseD, startPos, pathDesc, path in self.distances[segment]),
key=lambda x:x[0])[1]
def tortuosityTo(self, segment, pos=0.5):
stopCoord = segment.coordAt(pos)
euclideanD = sqrt(sum((r0-r1)**2 for r0,r1 in zip(self.startCoord,
stopCoord)))
pathD = self.distanceTo(segment, pos)
tortuosity = pathD / euclideanD
if tortuosity < 1.0:
warn('Tortuosity < 1',
self.pathDescriptionTo(segment, pos))
raise RuntimeError('Path to %s at %g has tortuosity < 1'
% (segment.name, pos))
return tortuosity
def branchOrder(self, segment):
return self.branchOrders[segment]
def getElectrotonicLengths(self, steadyVoltages):
# given a list of steady-state voltages, return electrotonic lengths of all
# segments
return [self._getElectrotonicLength(ind, seg, steadyVoltages)
for ind, seg in enumerate(self.network)]
def _getElectrotonicLength(self, index, segment, steadyVoltages):
# given list of steady-state voltages, compute the electrotonic length of a
# specific segment
dSeg = self.distanceTo(segment)
vSeg = steadyVoltages[index]
segOrder = self.branchOrders[segment]
# use only one other neighbor, pick the closest one that is of a different
# branch order
neighbor = min((s for s in segment.neighbors
if self.branchOrders[s] != segOrder),
key=lambda n: abs(self.distanceTo(n) - dSeg))
dNeighbor = self.distanceTo(neighbor)
vNeighbor = steadyVoltages[self.network.index(neighbor)]
eLength = (dNeighbor - dSeg) / log(vSeg / vNeighbor)
if eLength < 0:
nIndex = self.network.index(neighbor)
nOrder = self.branchOrders[neighbor]
print('eLength=%g: ind=%d, nInd=%d, d=%g, nd=%g, v=%g, nV=%g, '
% (eLength, index, nIndex, dSeg, dNeighbor, vSeg, vNeighbor)
+ 'order=%d, nOrder=%d' % (segOrder, nOrder))
return eLength
def _computeDistances(self):
# use Dijkstra's algorithm to find path distance from start to rest of
# neuron.
# Keep track of effect of startPos (starting position in startSegment)
# Also keep track of effect of pos of each final segment
segment, startPos = self.startSegment, self.startPos
# distances is a dict object, with segments as keys
# the values are a list of paths, each path described by a tuple
# (pathDistance, connecting location in the segment, path description,
# list of segments in path )
distances = { segment : [(0.0, startPos,
segment.name + '(%.1f)' % startPos, [segment])] }
branchOrders = { segment : 0 }
openSegments = { segment, }
while openSegments:
segment = openSegments.pop()
for currentD, startPos, segPathDesc, segPath in distances[segment]:
branchOrderInc = 1
if branchOrders[segment] > 0 and len(segment.neighbors) <= 2:
branchOrderInc = 0
#branchOrderInc = int(len(segment.neighbors) > 2)
for neighbor, (connectLoc, nConnectLoc, node) \
in zip(segment.neighbors, segment.neighborLocations):
pathD = currentD + segment.length * abs(startPos - connectLoc)
# check if neighbor in distances?
if neighbor not in distances:
# found path to new segment
nPath = segPath + [neighbor]
nPathDesc = segPathDesc + '->(%.1f)->' % connectLoc + \
neighbor.name + '(%.1f)' % nConnectLoc
distances[neighbor] = [(pathD, nConnectLoc, nPathDesc, nPath)]
openSegments.add(neighbor)
branchOrders[neighbor] = branchOrders[segment] + branchOrderInc
else:
# Either there is a loop involving this segment, or the path is
# backtracking
# Check if the current path is an efficient route to the loop
efficient = True
insertInd = None
loopDistances = distances[neighbor]
for ind, (loopD, loopPos, loopPathDesc, loopPath) in \
enumerate(loopDistances):
traverse = neighbor.length * abs(loopPos - nConnectLoc)
if pathD >= loopD + traverse:
# the new path to neighbor is too slow to ever be useful
efficient = False
break
elif pathD + traverse < loopD:
# the new path to neighbor renders an old one obsolete
loopDistances.pop(ind)
if insertInd is None and pathD < loopD:
insertInd = ind
if efficient:
nPath = segPath + [neighbor]
nPathDesc = segPathDesc + '->(%.1f)->' % connectLoc + \
neighbor.name + '(%.1f)' % nConnectLoc
pathInfo = (pathD, nConnectLoc, nPathDesc, nPath)
if insertInd is None:
loopDistances.append(pathInfo)
else:
loopDistances.insert(insertInd, pathInfo)
distances[neighbor] = loopDistances
openSegments.add(neighbor)
branchOrders[neighbor] = branchOrders[segment] + branchOrderInc
if self.warnLoops and len(loopDistances) > 1:
warn('%d efficient paths to %s.' % (len(loopDistances), neighbor.name))
for ind, loopPos, loopPath in loopDistances:
print(loopPath)
return distances, branchOrders
class Geometry:
def __init__(self, _fileName = None):
# who knows, do something?
self.path = None
self.fileName = None
self.name = None
# store the geometry info here
self.nodes = []
self.segments = []
self.branches = []
self.compartments = []
self.branchOrders = None
self.tags = {'*' : 0}
# helper sets for efficient deleting
self._removeNodes = set()
self._removeSegments = set()
# keep track of which objects have had connectivity checked
self._connectivityChecked = set()
self._soma = None
self._somaBranch = None
self._axons = None
self._axonsBranch = None
self.surfaceArea = 0.0 # mm^2
self.volume = 0.0 # mm^3
self.minRange = [float('nan'), float('nan'), float('nan')]
self.maxRange = [float('nan'), float('nan'), float('nan')]
if _fileName is not None:
self.setFileName(_fileName)
self.readGeometry()
def setFileName(self, fileName):
self.path = os.path.dirname(os.path.abspath(fileName))
self.fileName = fileName
self.name = os.path.basename(fileName).split('.')[0]
def numCompartments(self):
return len(self.compartments)
def readGeometry(self):
raise RuntimeError( \
'Geometry must be a subclass that knows how to read files')
def displaySummary(self):
"""
Display summary statistics of neuron geometry
"""
print("total number of nodes: %d" % len(self.nodes))
print("total number of compartments: %d" % len(self.compartments))
print("total number of segments: %d" % len(self.segments))
subGraphs = self.checkConnectivity(removeDisconnected=True, debugInfo=True)
print("number of connected nodes: %d" % len(self.nodes))
print("number of connected compartments: %d" % len(self.compartments))
print("number of connected segments: %d" % len(self.segments))
self.findBranches()
print("number of branches: %d" % len(self.branches))
soma = self.soma
somaArea = sum(c.surfaceArea for c in soma.compartments \
if 'Soma' in c.tags)
print('Soma Area = %g mm^2' % somaArea)
print('Found %d axon%s' % (len(self._axons), 's'*(len(self._axons)!=1)))
print("volume: %g mm^3" % self.volume)
print("surface area: %s mm^2" % self.surfaceArea)
self.calcBranchOrder(doPlot=False)
self.shollAnalysis(straightenNeurites=True)
self.mergeBranchesByDistanceToEdge()
pyplot.show() ##### SHOW PLOTS!!!!!!!!!!!!!!!!!!!!!!!!!!!
#############################################################################
def getProperties(self, passiveFile="", display=False, # CHANGED FALSE
makePlots=False):
def _dispListStats(L, confidence = 0.05, display=True, printName=""):
# return median, lowBound, highBound
sortedL = sorted(L)
numL = len(L)
if numL % 2:
# odd number of elements
medianInd = int((numL - 1) / 2)
median = sortedL[medianInd]
else:
medianInd = numL / 2
median = (sortedL[int(medianInd)] + sortedL[int(medianInd - 1)]) / 2.0
lowInd = int(round( 0.5 * confidence * numL ))
highInd = int(round( (1.0 - 0.5 * confidence) * numL ))
low = sortedL[lowInd] ; high = sortedL[highInd]
if display and printName:
print('%s = %.2f +%.2f -%.2f'
% (printName, median, high - median, median - low))
def _plotTraces(timeTrace, vTraces):
from scipy import array
fig = pyplot.figure()
axes = fig.add_subplot(1,1,1)
y = array(vTraces.values()).transpose()
axes.plot(timeTrace, y)
pyplot.ylabel('Membrane Potential (mV)')
pyplot.xlabel('Time (ms)')
pyplot.title('Model Response to Step Current')
pyplot.tight_layout()
from scipy.optimize import brentq, fmin
def _rallLaw(p, *ratios):
try:
return sum(r**p for r in ratios) - 1.0
except OverflowError:
return float('inf')
def _rallLawTrouble(p, *ratios):
try:
return (sum(r**p for r in ratios) - 1.0)**2
except OverflowError:
return float('inf')
def _getRallPow(parentR, daughterRs):
ratios = tuple(d / parentR for d in daughterRs)
checkPows = [-log(len(ratios)) / log(r) for r in ratios if r != 1.0]
try:
return brentq(_rallLaw, min(checkPows), max(checkPows), args=ratios)
except ValueError:
#print('Rall-incompatible branch ratios: %s'
# % ' '.join('%.2f' % r for r in ratios))
return fmin(_rallLawTrouble, 0.0, args=ratios, disp=False)[0]
def _overallRall(p, ratiosList):
return sum(_rallLawTrouble(p, *ratios) for ratios in ratiosList)
def _getOverallRallPow(ratiosList):
return fmin(_overallRall, 0.0, args=(ratiosList,), disp=False)[0]
# check connectivity
self.checkConnectivity(removeDisconnected=True, removeLoops=True)
self.findBranches()
if display:
print("number of connected nodes: %d" % len(self.nodes))
print("number of connected compartments: %d" % len(self.compartments))
print("number of connected segments: %d" % len(self.segments))
print("number of branches: %d" % len(self.branches))
print('Surface area = %g mm^2' % self.surfaceArea)
print('Volume = %g mm^3' % self.volume)
print('Surface to volume ratio = %g mm^-1'
% (self.surfaceArea/self.volume))
# make a path distance finder centered at the soma
pDF = PathDistanceFinder(self, self.soma)
# find all the neuron tips
tips, tipPositions = self.getTips()
# measure path lengths from Soma to tips
pathLengths = [pDF.distanceTo(tip, pos)
for tip, pos in zip(tips, tipPositions)]
_dispListStats(pathLengths, display=display,
printName='Path length from Soma to tips')
# measure tortuosities from Soma to tips
tortuosities = [pDF.tortuosityTo(tip, pos)
for tip, pos in zip(tips, tipPositions)]
_dispListStats(tortuosities, display=display,
printName='Tortuosity of path from Soma to tips')
# measure branch tortuosities
bTortuosities = [branch.tortuosity for branch in self.branches
if branch.tortuosity < float('inf')]
_dispListStats(bTortuosities, display=display,
printName='Tortuosity of neuron branches')
if self.soma.branchOrder is None:
self.calcBranchOrder(doPlot=False)
self.mergeBranchesByDistanceToEdge(makePlots=makePlots)
branchAngles = [getBranchAngle(branch, neighbor, segLoc, nLoc, node)
for branch in self.branches
for neighbor, (segLoc, nLoc, node)
in zip(branch.neighbors, branch.neighborLocations)
if neighbor.branchOrder > branch.branchOrder]
_dispListStats(branchAngles, display=display,
printName='For neuron branches, branch angle')
radList = [] # added 11.12.2014
rallRatios = []
daughterRatios = []
rallPowers = []
ratiosList = []
DPratios = [] # added 11.12.2014
daughterdaughter = [] # daughter-daughter ratio
for segment in self.branches:
#if segment.branchOrder < 4:
# continue
daughters = [n for n in segment.neighbors
if n.branchOrder > segment.branchOrder]
if daughters:
rallRatio = \
sum(n.avgRadius**1.5 for n in daughters) / segment.avgRadius**1.5
rallRatios.append(rallRatio)
daughterRatios.extend(n.avgRadius / segment.avgRadius
for n in daughters)
# added 11.12.2014
daughtRads, daughtCount = [], 0
DDR = []
for n in daughters:
daughtRads.append(n.avgRadius)
daughtCount = daughtCount + 1
DDR.append(n.avgRadius)
DPratios.append( (sum(daughtRads)/daughtCount)/segment.avgRadius )
daughterdaughter.append(np.mean([DDR[d]/DDR[d+1] for d in
range(len(DDR)-1)]))
rallPowers.append(_getRallPow(segment.avgRadius,
[n.avgRadius for n in daughters]))
ratiosList.append([n.avgRadius / segment.avgRadius for n in daughters])
radList.append(segment.avgRadius)
_dispListStats(rallRatios, display=display,
printName='For neuron branches, Rall ratio')
_dispListStats(daughterRatios, display=display,
printName='For neuron branches, daughter branch ratio')
_dispListStats(rallPowers, display=display,
printName='For neuron branches, Rall power')
properties = {
'Num Nodes' : len(self.nodes),
'Num Compartments' : len(self.compartments),
'Num Segments' : len(self.segments),
'Num Branches' : len(self.branches),
'Surface Area' : self.surfaceArea,
'Volume' : self.volume,
'Area-To-Volume Ratio' : self.surfaceArea / self.volume,
'Path Length' : pathLengths,
'Tortuosity' : tortuosities,
'Branch Tortuosity' : bTortuosities,
'Branch Angles' : branchAngles,
'Rall Ratio' : rallRatios,
'Daughter/Parent Radius' : daughterRatios,
'Overall Rall Power' : _getOverallRallPow(ratiosList),
'Radius List': radList,
'DP Ratio': DPratios,
'DD Ratio': daughterdaughter
}
units = {
'Num Nodes' : '',
'Num Compartments' : '',
'Num Segments' : '',
'Num Branches' : '',
'Surface Area' : 'mm^2',
'Volume' : 'mm^3',
'Area-To-Volume Ratio' : 'mm^-1',
'Path Length' : 'um',
'Tortuosity' : '',
'Branch Tortuosity' : '',
'Branch Angles' : 'degrees',
'Rall Ratio' : '',
'Daughter/Parent Radius' : '',
'Overall Rall Power' : '',
'Radius List': 'um',
'DP Ratio': '',
'DD Ratio': ''
}
if passiveFile:
from neuron_simulateGeometry import makeModel, simulateModel
import peelLength
import json
# get the properties
with open(passiveFile, 'r') as fIn:
passiveProperties = json.load(fIn)
# make a demo model
model = makeModel(self, passiveProperties)
# simulation model on specified geometry
timeTrace, vTraces, textOutput = simulateModel(self, model)
if makePlots:
_plotTraces(timeTrace, vTraces)
somaV = max(vTraces[self.soma.name])
rIn = somaV / model['stimulus']['amplitude']
properties['Input resistance'] = rIn
units['Input resistance'] = 'MOhm'
if display:
print('Input resistance = %g MOhm' % rIn)
tipsV = [max(vTraces[segment.name]) for segment in self.segments
if 'Soma' not in segment.tags and segment.isTerminal]
tipsTransfer = [tipV / somaV for tipV in tipsV]
_dispListStats(tipsTransfer, display=display,
printName='Coupling coefficient from soma to tips')
properties['Coupling Coefficient'] = tipsTransfer
units['Coupling Coefficient'] = ''
model, vErr, vResid = \
peelLength.modelResponse(timeTrace, vTraces[self.soma.name],
verbose=False, findStepWindow=True,
plotFit=False, debugPlots=False,
displayModel=display)
tauM = model[0][0]
if display:
print('membrane tau = %6.2f ms' % tauM)
properties['Membrane Time Constant'] = tauM
units['Membrane Time Constant'] = 'ms'
if makePlots:
self.shollAnalysis()
return properties, units
def findBranches(self):
"""
Break up geometry into segments defined by branch points, starting at the
soma
"""
if self.branches:
return
self.checkConnectivity(removeDisconnected=True)
if not self._somaBranch:
self._findSoma()
somaBranch, somaNeighbors0, somaNeighbors1 = self._somaBranch
# This can cause weird errors if not fixed:
if somaBranch.neighbors:
warn('Some routine cleared self.branches without removing '
+ 'somaBranch.neighbors')
somaBranch.neighbors = []
self.branches = [somaBranch]
openBranches = [(somaBranch, 0, somaNeighbors0), \
(somaBranch, 1, somaNeighbors1)]
openCompartments = set(self.compartments).difference(
somaBranch.compartments)
while openBranches:
# check an open branch to see if it has any neighbors branching off
checkBranch, side, neighbors = openBranches.pop()
# find neighbors on specified side of checkBranch
commonNeighbors = { (checkBranch, side), }
checkNode = checkBranch.nodes[-side]
for segment, pos, compartment in neighbors:
if compartment not in openCompartments:
continue
# for each neighboring segment, find the branch it's in, based on
# compartment (ignore segment and pos)
branch, neighbors0, neighbors1 = self._getBranch(compartment)
# add that branch to geometry
self.branches.append(branch)
# remove the compartments in branch from openCompartments
openCompartments.difference_update(branch.compartments)
# add branch to dict of common neighbors, at appropriate side
# and add to openBranches with appropriate neighbors
if branch.nodes[-1] == checkNode:
if branch.nodes[0] == checkNode:
# branch is a loop with both ends connected to _check
commonNeighbors.add( (branch, 0) )
commonNeighbors.add( (branch, 1) )
else:
# side 1 of branch connects to checkBranch at checkNode
if branch.nodes[-1] != checkNode:
checkTags = ' '.join(checkBranch.tags)
checkNodeInd = self.nodes.index(checkNode)
branchTags = ' '.join(branch.tags)
branchNodes =str(tuple(self.nodes.index(n) for n in branch.nodes))
raise AssertionError(('Node mismatch. %s (with tags %s) should'
+ ' connects to %s (with tags %s) at node %d, but %s has '
+ 'nodes %s') % (checkBranch.name, checkTags, branch.name,
branchTags, checkNodeInd, branch.name,
branchNodes))
commonNeighbors.add( (branch, 1) )
# side 0 is still open
openBranches.append((branch, 0, neighbors0))
else:
# side 0 of branch connects to checkBranch at checkNode
if branch.nodes[0] != checkNode:
checkTags = ' '.join(checkBranch.tags)
checkNodeInd = self.nodes.index(checkNode)
branchTags = ' '.join(branch.tags)
branchNodes = str(tuple(self.nodes.index(n) for n in branch.nodes))
raise AssertionError(('Node mismatch. %s (with tags %s) should'
+ ' connects to %s (with tags %s) at node %d, but %s has '
+ 'nodes %s') % (checkBranch.name, checkTags, branch.name,
branchTags, checkNodeInd, branch.name,
branchNodes))
commonNeighbors.add( (branch, 0) )
# side 1 is still open
openBranches.append((branch, 1, neighbors1))
# update the neighbors of _check and all the new branches
while commonNeighbors:
n1, n1Side = commonNeighbors.pop()
for n2, n2Side in commonNeighbors:
_makeNeighbors(n1, n2, n1Side, n2Side, checkNode)
def _plotBranchStat(self, branchStat, yLabel, title, \
fontSize=22, barWidth=0.25):
### plot collected statistic along with number of branches
### branchStat should be a dictionary with:
### -each key is a branch order (an integer)
### -each item is a list of y-values (the list for all branches with
### that branch order)
order = list(branchStat.keys())
order.sort()
y = [branchStat[o] for o in order]
x = list(range(len(order)))
orderStr = [str(o) for o in order]
numBranches = [len(y_n) for y_n in y]
# make new figure
fig = pyplot.figure()
# plot number of branches as bar plot
ax1 = pyplot.gca()
pyplot.bar(x, numBranches, width=barWidth, color='g')
pyplot.ylabel('# branches', fontsize=fontSize)
pyplot.xlabel('Branch Order', fontsize=fontSize)
pyplot.xticks(x, orderStr)
# plot y statistics as a box and whisker plot
positions = [x_n - barWidth/2.0 for x_n in x]
ax2 = pyplot.twinx()
pyplot.boxplot(y, positions=positions, widths=barWidth)
pyplot.title(title, fontsize=fontSize)
pyplot.ylabel(yLabel, fontsize=fontSize)
pyplot.xlabel('Branch Order', fontsize=fontSize)
pyplot.xticks(x, orderStr)
# set the numBranches y-axis and labels on right, main on left
ax1.yaxis.tick_right()
ax1.yaxis.set_label_position('right')
ax2.yaxis.tick_left()
ax2.yaxis.set_label_position('left')
pyplot.tight_layout()
return fig
def _plotBranchOrderStatistics(self):
### Visualize various statistics dependent upon branch order
# collect data
branchRadius = {}
branchLength = {}
for branch in self.mergedBranches:
order = branch.centripetalOrder
if order not in branchRadius:
branchRadius[order] = [branch.maxRadius]
branchLength[order] = [branch.length]
else:
branchRadius[order].append(branch.maxRadius)
branchLength[order].append(branch.length)
self._plotBranchStat(branchRadius, \
'Radius (um)', 'Branch Order vs. Radius')
self._plotBranchStat(branchLength, \
'Length (um)', 'Branch Order vs Length')
def calcBranchOrder(self, doPlot=True):
self.calcForewardBranchOrder(doPlot=False, printAxonInfo=False)
self.calcCentripetalOrder(doPlot=doPlot, network=self.branches)
self.calcCentripetalOrder(doPlot=doPlot, network=self.segments)
def calcForewardBranchOrder(self, doPlot=True, printAxonInfo=False):
somaPos = self.soma.centroidPosition(mandateTag='Soma')
pDF = PathDistanceFinder(self, self.soma, somaPos)
for segment in self.segments:
segment.branchOrder = pDF.branchOrder(segment)
self.findBranches()
somaPos = self.somaBranch.centroidPosition(mandateTag='Soma')
pDF = PathDistanceFinder(self, self.somaBranch, somaPos)
for branch in self.branches:
branch.branchOrder = pDF.branchOrder(branch)
def calcCentripetalOrder(self, doPlot=True, network=None):
"""
Define neurite "ends" to be segments that have locally maximal branchOrder
(ends are terminal segments unless there are loops).
Label each end with centripetal order 0.
Every other segment is labeled with centripetal order equal to the length
of the longest path from an end to that segment, provided that allowable
paths ALWAYS move towards the soma.
geometry.calcCentripetalOrder() sets segment.centripetalOrder set to this
value for all segments in network
"""
if network is None or not network:
self.findBranches()
network = self.branches
def _isEnd(segment):
return all(segment.branchOrder >= n.branchOrder
for n in segment.neighbors)
ends = [segment for segment in network if _isEnd(segment)]
for segment in ends:
if 'Soma' in segment.tags:
print('FUCK')
print(segment.branchOrder)
print([n.branchOrder for n in segment.neighbors])
print(all(segment.branchOrder >= n.branchOrder
for n in segment.neighbors))
for segment in network:
segment.centripetalOrder = -1
for segment in ends:
# mark each end as having centripetal order 0
segment.centripetalOrder = 0
# now find paths from each end towards the soma. Each segment's
# centripetal order is the length of the longest path from an end to the
# segment, PROVIDED that the path ALWAYS moves towards the soma
openSegs = [segment]
while openSegs:
currentSeg = openSegs.pop()
neighborCentripOrder = currentSeg.centripetalOrder + 1
for neighbor in currentSeg.neighbors:
# look for new paths to soma, insisting that:
# 1. The path ALWAYS moves closer to soma
# 2. There isn't already a longer path through this area
if neighbor.branchOrder < currentSeg.branchOrder and \
neighbor.centripetalOrder < neighborCentripOrder:
# this is a new (good) path, so mark the centripetal order and
# continue it
neighbor.centripetalOrder = neighborCentripOrder
openSegs.append(neighbor)
def checkConnectivity(self, removeDisconnected=False, checkObjects=None,
debugInfo=True, removeLoops=False):
"""
Compute the connectivity of the network:
-The number/members of connected subgraphs
-The presence of any loops
if removeDisconnected is True, remove all but largest subgraph from network
Return the list of subgraphs
"""
if checkObjects is None:
checkObjects = self.segments
checkHash = hash(str(checkObjects)+str(removeDisconnected))
if checkHash in self._connectivityChecked:
# don't need to check again
return
# check to be sure that neighborhood at a location/node is transitive
for segment in checkObjects:
for neighbor, (pos, nPos, node) in zip(segment.neighbors,
segment.neighborLocations):
assert segment in node.segments, \
"%s should be in node %d's list of segments, but is not" \
% (segment.name, self.nodes.index(node))
assert neighbor in node.segments, \
"%s should be in node %d's list of segments, but is not" \
% (neighbor.name, self.nodes.index(node))
for n2, (pos2, nPos2, node2) in zip(neighbor.neighbors,
neighbor.neighborLocations):
if node2 != node:
continue
assert n2 == segment or n2 in segment.neighbors, \
"%s and %s are neighbors at node %d, and so are %s and %s,"\
" but %s and %s are not" % (segment.name, neighbor.name,
self.nodes.index(node), neighbor.name, n2.name,
segment.name, n2.name)
subGraphs = []
checkObjs = {obj for obj in checkObjects}
while checkObjs:
# start checking new subgraph
start = checkObjs.pop()
connected = { (start, None) }
subGraph = { start }
pathSegNames = { start : [start.name] }
paths = {start : []}
while connected:
# find all the elements connected to this subgraph
segment, startNode = connected.pop()
for neighbor, (pos, nPos, node) in zip(segment.neighbors,
segment.neighborLocations):
if node == startNode and neighbor != segment:
# this is just backtracking
continue
if neighbor in checkObjs:
connected.add((neighbor, node))
subGraph.add(neighbor)
checkObjs.remove(neighbor)
pathSegNames[neighbor] = pathSegNames[segment] + [neighbor.name]
paths[neighbor] = paths[segment] + [(segment, node, neighbor)]
else:
# there is a loop!
names1 = pathSegNames[segment]
try:
names2 = pathSegNames[neighbor]
except KeyError as err:
print(segment.name, neighbor.name, neighbor in self.segments,
neighbor in checkObjs, neighbor in subGraph,
neighbor in pathSegNames)
print([neighbor in s for s in subGraphs])
raise err
ind = 0
for name1, name2 in zip(names1, names2):
if name1 != name2:
ind -= 1
break
ind += 1
loopSegNames = names1[ind:] + names2[:ind-1:-1]
if removeLoops:
#loops.append(loopSegNames)
warn('Have not implement loop removal.\nLoop detected',
'->'.join(loopSegNames))
else:
pass
# warn('Loop detected', '->'.join(loopSegNames))
subGraphs.append(subGraph)
# sort the subgraphs so that the largest is first
if isinstance(checkObjects[0], Segment):
checkType = 'Segment'
subGraphs.sort(key=lambda x: sum([len(y.compartments) for y in x]))
elif isinstance(checkObjects[0], Compartment):
checkType = 'Compartment'
subGraphs.sort(key=lambda x: len(x))
else:
raise RuntimeError("Can't sort type: %s" % str(type(checkObjects[0])))
if debugInfo:
print('Number of subgraphs = %d / size of graphs: %s'
% (len(subGraphs), str([len(graph) for graph in subGraphs])))
if removeDisconnected and len(subGraphs) > 1:
badGraphs, subGraphs = subGraphs[:-1], subGraphs[-1]
badSegs = set()
badComps = set()
badNodes = set()
if checkType == 'Segment':
while badGraphs:
subGraph = badGraphs.pop()
# find unwanted objects in the subgraph
badSegs.update(subGraph)
for seg in subGraph:
badComps.update(seg.compartments)
badNodes.update(seg.nodes)
else:
while badGraphs:
subGraph = badGraphs.pop()
# find unwanted objects in the subgraph
badComps.update(subGraph)
for comp in subGraph:
badNodes.update(comp.nodes)
badSegs.add(comp.segment)
self.segments[:] = \
[seg for seg in self.segments if seg not in badSegs]
self.compartments[:] = \
[comp for comp in self.compartments if comp not in badComps]
self.nodes[:] = [node for node in self.nodes if node not in badNodes]
self.branches = []
if self._somaBranch is not None:
self._somaBranch[0].neighbors = []
checkHash = hash(str(checkObjects)+str(removeDisconnected))
print("Removed all but largest subgraphs")
# record that the connectivity is already checked
self._connectivityChecked.add(checkHash)
return subGraphs
def _plotShollGraph(self, distances):
"""
Plot the number of neurites at a given distance
"""
# neuriteDistance starts at zero, and has two data points for each
# distance: one with the previous (running) number of compartments, and one
# with the change added in (running +1 or -1)
runningNum = 0
neuriteDistance = [0.0]
numIntersections = [0]
lastNeuriteDistance = 0.0
for d in distances:
if d[0] > lastNeuriteDistance:
neuriteDistance.append(lastNeuriteDistance)
numIntersections.append(runningNum)
neuriteDistance.append(d[0])
numIntersections.append(runningNum)
lastNeuriteDistance = d[0]
runningNum += d[1]
neuriteDistance.append(lastNeuriteDistance)
numIntersections.append(runningNum)
fig = pyplot.figure()
pyplot.plot(neuriteDistance, numIntersections, 'k-')
pyplot.title('Sholl Analysis', fontsize=22)
pyplot.xlabel('Distance from soma', fontsize=22)
pyplot.ylabel('Number of compartments', fontsize=22)
ax = pyplot.gca()
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(16)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(16)
pyplot.tight_layout()
def shollAnalysis(self, straightenNeurites=True):
"""
Find the number of neurites that intersect a sphere of a given radius
"""
# get the centroid of the soma, weighting each compartment's contribution
# by volume
# define how distance from centroid to compartment is measured
if straightenNeurites:
# get distance traveled along neurites (e.g. as though neuron was
# straightened out
centroid = self.soma.centroidPosition(mandateTag='Soma')
# compute distance from soma to each segment
somaPaths = PathDistanceFinder(self, self.soma, centroid)
# store results in an array
distances = []
for s in self.segments:
d0, d1 = somaPaths.distanceTo(s, 0.0), somaPaths.distanceTo(s, 1.0)
if d1 < d0:
d0, d1 = d1, d0
distances.append((d0, 1))
distances.append((d1, -1))
else:
# get euclidean distance from soma centroid to each compartment
# (must be done compartment by compartment, because segments curve)
centroid = self.soma.centroid(mandateTag='Soma')
# define distance from centroid to compartment
def _centroidDist(c):
def _tupleDist(_t1, _t2):
return sqrt( (_t1[0] - _t2[0])**2 + \
(_t1[1] - _t2[1])**2 + \
(_t1[2] - _t2[2])**2 )
d0 = _tupleDist(centroid, (c.x0, c.y0, c.z0))
d1 = _tupleDist(centroid, (c.x1, c.y1, c.z1))
if d0 <= d1:
return d0, d1
else:
return d1, d0
# compute the distance from the soma centroid to each compartment that's
# not in the soma
distances = []
for c in self.compartments:
if 'Soma' in c.tags:
continue
d0, d1 = _centroidDist(c)
distances.append((d0, 1))
distances.append((d1, -1))
# sort the distances by increasing distance
distances.sort(key=lambda x: x[0])
self._plotShollGraph(distances)
def _addSegment(self, name, segList=None):
"""
Add a new segment to the model
"""
if name in self.tags:
raise IOError('Tried to create segment with existing name/tag')
newSeg = Segment(self)
newSeg.name = name
if name not in self.tags:
self.tags[name] = 0
if segList is None:
segList = self.segments
segList.append(newSeg)
return newSeg
def _addNode(self, segment, _x, _y, _z, _r1, _r2=None, _r3=None,
_theta = 0.0, _phi=0.0):
"""
Define and add a new node to the model within the specified segment
"""
newNode = Node(_x, _y, _z, _r1, _r2, _r3, _theta, _phi)
newNode.segments.append(segment)
newNode.tags.update(segment.tags)
newNode.tags.add(segment.name)
self.nodes.append(newNode)
segment.nodes.append(newNode)
return newNode
def _addCompartment(self, segment, node0, node1=None, append=False):
"""
Define and add compartment to geometry within specified segment
"""
if type(node0) is int:
# passed an index, get the node object
node0 = self.nodes[node0]
if node1 is None:
# define one-node compartment and add it to the segment
newComp = OneNodeCompartment(node0)
node0.compartments.append(newComp)
if node0 == segment.nodes[0]:
segment.compartments.insert(0, newComp)
else:
assert node0 == segment.nodes[-1]
segment.compartments.append(newComp)
else:
# define two-node compartment and add it to the segment
if type(node1) is int:
node1 = self.nodes[node1]
newComp = TwoNodeCompartment(node0, node1)
node0.compartments.append(newComp)
node1.compartments.append(newComp)
if append or _node1 == segment.nodes[-1]:
segment.compartments.append(newComp)
else:
compInd = segment.nodes.index(node0)
if len(segment.compartments) and \
isinstance(segment.compartments[0], OneNodeCompartment):
compInd += 1
segment.compartments.insert(compInd, newComp)
# add segment information to compartment
newComp.tags.update(segment.tags)
newComp.tags.add(segment.name)
newComp.segment = segment
# add compartment to geometry
self.compartments.append(newComp)
# update tag counts
self.tags['*'] += 1
for tag in newComp.tags:
self.tags[tag] += 1
# update geometry totals
self.surfaceArea += newComp.surfaceArea
self.volume += newComp.volume
return newComp
def _connectSegments(self, segment0, location0, segment1, location1,
implicitConnect=True):
"""
Connect two segments in the geometry, managing the connecting info in the
segments and their nodes and compartments
if implicitConnect is set to True:
if A connects to B at position (x,y,z)
AND _connectSegments is called to connect A and C and (x,y,z):
then ensure that A, B, and C are all connected at (x,y,z)
"""
node0 = segment0.nodeAt(location0)
node1 = segment1.nodeAt(location1)
# check to make sure the two nodes are identical
try:
assert (node0.x, node0.y, node0.z, node0.r1) == \
(node1.x, node1.y, node1.z, node1.r1), \
'Tried to connect %s (at %f) and %s (at %f) but connecting nodes ' \
% (segment0.name, location0, segment1.name, location1 ) \
+ 'were different'
except AssertionError:
# buggy .hoc file with non-matching connecting nodes
# check to see if there is a location where these segments COULD connect
# find location where segment0 and segment1 connect
numMatches = 0
oldLoc0, oldLoc1 = location0, location1
node0, location0, node1, location1 = None, None, None, None
if not segment0.nodeLocations:
segment0._setNodeLocations()
if not segment1.nodeLocations:
segment1._setNodeLocations()
for n0, l0 in zip(segment0.nodes, segment0.nodeLocations):
for n1, l1 in zip(segment1.nodes, segment1.nodeLocations):
if (n0.x, n0.y, n0.z, n0.r1) == (n1.x, n1.y, n1.z, n1.r1):
numMatches += 1
node0, location0, node1, location1 = n0, l0, n1, l1
if numMatches == 0:
# no valid connection
print(len(segment0.nodeLocations), len(segment1.nodeLocations))
print('No way to connect %s and %s' % (segment0.name, segment1.name))
raise
elif numMatches > 1:
# ambiguous as to where to connect
print('Multiple possible connections between %s and %s, giving up'
% (segment0.name, segment1.name))
#else:
#warn('incorrect connection location for %s (at %.1f) and %s (at %.1f)'
# % (segment0.name, oldLoc0, segment1.name, oldLoc1),
# 'actual connection at %.1f and %.1f' % (location0, location1))
def _replaceNode(_oldNode, _replacementNode):
"""
swap _oldNode with _replacementNode in all objects that use _oldNode
"""
# swap nodes in all relevant segments
#_replacementNode.segments.extend(_oldNode.segments)
for _seg in _oldNode.segments:
_ind = _seg.nodes.index(_oldNode)
_seg.nodes[_ind] = _replacementNode
if _seg not in _replacementNode.segments:
_replacementNode.segments.append(_seg)
for _nInd in range(len(_seg.neighbors)):
_loc, _nLoc, _node = _seg.neighborLocations[_nInd]
if _node == _oldNode:
_seg.neighborLocations[_nInd] = (_loc, _nLoc, _replacementNode)
# swap nodes in all relevant compartments
#_replacementNode.compartments.extend(_oldNode.compartments)
for _comp in _oldNode.compartments:
_ind = _comp.nodes.index(_oldNode)
_comp.nodes[_ind] = _replacementNode
if _comp not in _replacementNode.compartments:
_replacementNode.compartments.append(_comp)
# remove node1 from geometry
self._removeNodes.add(_oldNode)
if implicitConnect:
# loop through neighbors of segment0 at location0
for nSeg, (loc, nLoc, node) in zip(segment0.neighbors[:],
segment0.neighborLocations[:]):
if node == node0:
assert loc == location0
if loc == location0:
if nSeg == segment1:
assert nLoc != location1, \
'Tried to connect %s and %s, which are already connected' \
% (segment0.name, segment1.name)
# segment0 (at location0) connects to segment1 in two locations!
# location1 (the new connection), and
# nLoc (an older connection)
assert node == node0, 'Weird node mismatch near loop segment'
# this is unneccessary if the assertion is true (it should be):
# _replaceNode(node, node0)
# make nSeg neighbors with segment1
_makeNeighbors(segment1, nSeg, location1, nLoc, node0,
checkDuplicate=True)
# make nSeg neighbors with all of segment1's neighbors at loc1
for nSegN, (locN, nLocN, nodeN) in zip(segment1.neighbors[:],
segment1.neighborLocations[:]):
if nodeN != node1:
continue
assert locN == location1
_makeNeighbors(nSeg, nSegN, loc, nLocN, node0)
for nSeg, (loc, nLoc, node) in zip(segment1.neighbors[:],
segment1.neighborLocations[:]):
if node == node1:
assert loc == location1
if loc == location1:
if nSeg == segment0:
assert nLoc != location0, \
'Tried to connect %s and %s, which are already connected' \
% (segment0.name, segment1.name)
# segment1 (at location1) connects to segment0 in two locations!
# location0 (the new connection), and
# nLoc (an older connection)
assert node == node1, 'Weird node mismatch near loop segment'
# this is unneccessary if the assertion is true (it should be):
#_replaceNode(node, node0)
# make nSeg neighbors with segment0
_makeNeighbors(segment0, nSeg, location0, nLoc, node0,
checkDuplicate=True)
# make segment0 and segment1 neighbors, with node0 as the connecting node
_makeNeighbors(segment0, segment1, location0, location1, node0)
# remove node1 from the geometry and replace it everywhere with node0
_replaceNode(node1, node0)
def _mergeSegments(self, segmentA, segmentB, _segList):
# Merge segmentA and segmentB into one segment, preserving their neighbor
# information
#
assert segmentA in _segList
assert segmentB in _segList
assert segmentB in segmentA.neighbors and \
segmentA in segmentB.neighbors, \
"Merged segments must be neighbors"
# find the connection location
ind = segmentA.neighbors.index(segmentB)
(locationA, locationB, nodeAB) = segmentA.neighborLocations[ind]
assert locationA in [0.0, 1.0] and locationB in [0.0, 1.0], \
"Tried to merge with locationA=%g, locationB=%g, but both locations must be an end point" \
% (locationA, locationB)
lenA = segmentA.length
lenB = segmentB.length
newLen = lenA + lenB
if locationA == 1.0:
if locationB == 0.0:
# define function to get location on the new segment
# add segmentB compartments + nodes to segmentA
segmentA.compartments += segmentB.compartments
segmentA.nodes += segmentB.nodes[1:]
else: #locationB == 1.0:
# add segmentB compartments + nodes to segmentA
segmentA.compartments += reversed(segmentB.compartments)
segmentA.nodes += reversed(segmentB.nodes[:-1])
else:
if locationB == 0.0:
# add segmentB compartments + nodes to segmentA
segmentA.compartments = list(reversed(segmentB.compartments)) \
+ segmentA.compartments
segmentA.nodes = list(reversed(segmentB.nodes[1:])) \
+ segmentA.nodes
else: #locationB == 1.0:
# add segmentB compartments + nodes to segmentA
segmentA.compartments = segmentB.compartments + segmentA.compartments
segmentA.nodes = segmentB.nodes[:-1] + segmentA.nodes
# update node locations
segmentA._setNodeLocations()
# remove segmentB from segmentA's neighbors
_removeNeighbor(segmentA, segmentB)
# remove segmentA from segmentB's neighbors
_removeNeighbor(segmentB, segmentA)
# recompute the location of segmentA's old neighbors
for ind in range(len(segmentA.neighbors)):
neighbor = segmentA.neighbors[ind]
loc, nLoc, node = segmentA.neighborLocations[ind]
nodeInd = segmentA.nodes.index(node)
newLoc = segmentA.nodeLocations[nodeInd]
# update the connection location in segmentA
segmentA.neighborLocations[ind] = (newLoc, nLoc, node)
# update the connection location in the neighbor
nInd = zip(neighbor.neighbors,
neighbor.neighborLocations).index((segmentA,
(nLoc, loc, node)))
neighbor.neighborLocations[nInd] = ((nLoc, newLoc, node))
# replace segmentB with segmentA in other segments' neighbors
# and add those neighbors as new neighbors to segmentA
for neighbor, (loc, nLoc, node) in zip(segmentB.neighbors,
segmentB.neighborLocations):
# update the location
nInd = zip(neighbor.neighbors,
neighbor.neighborLocations).index((segmentB,
(nLoc, loc, node)))
nodeInd = segmentA.nodes.index(node)
newLoc = segmentA.nodeLocations[nodeInd]
# make segmentB no longer neighbor's neighbor
#_removeNeighbor(neighbor, segmentB)
# make segmentA and neighbor neighbors
#_makeNeighbors(segmentA, neighbor, newLoc, nLoc, node)
# replace segmentB by segmentA in neighbor.neighbors, at new location
neighbor.neighbors[nInd] = segmentA
neighbor.neighborLocations[nInd] = (nLoc, newLoc, node)
# add neighbor to segmentA's list of neighbors
segmentA.neighbors.append(neighbor)
segmentA.neighborLocations.append((newLoc, nLoc, node))
# update segmentB's tags into segmentA
segmentA.tags.update(segmentB.tags)
segmentA.tags.add(segmentB.name)
# remove segmentB from the list of segments
#[SLOW]
#_segList.remove(segmentB)
self._removeSegments.add(segmentB)
def mergeBranchesByDistanceToEdge(self, makePlots=True):
if not hasattr(self.soma, 'centripetalOrder') or \
self.soma.centripetalOrder is None:
self.calcBranchOrder(doPlot=False)
def _getMergePath(startB, considerBranches):
startLen = startB.length
if startB.branchOrder == 0:
startLen *= 0.5
finishedPaths = []
# start at startB
openPaths = [([startB], startLen)]
# find all the paths from startB to a neurite tip
while openPaths:
path, length = openPaths.pop()
current = path[-1] ; nextOrder = current.branchOrder + 1
nextBranches = [n for n in current.neighbors
if n.branchOrder == nextOrder]
if nextBranches:
for n in nextBranches:
openPaths.append((path + [n], length + n.length))
else:
finishedPaths.append((path, length))
# return the longest path
return max(finishedPaths, key=lambda x:x[1])[0]
self.mergedBranches = []
considerBranches = {b for b in self.branches}
while considerBranches:
minOrder = min(b.branchOrder for b in considerBranches)
startB = [b for b in considerBranches
if b.branchOrder == minOrder].pop()
mergePath = _getMergePath(startB, considerBranches)
merged = Segment(self)
merged.name = 'MergedBranch%d' % len(self.mergedBranches)
# add the compartments, nodes, tags, and branch order
merged.compartments = startB.compartments[:]
merged.nodes = startB.nodes[:]
merged.tags = {t for t in startB.tags}
merged.branchOrder = startB.branchOrder
merged.centripetalOrder = startB.centripetalOrder
# add .merged element to startB
startB.merged = merged
considerBranches.remove(startB)
previous = startB
for b in mergePath[1:]:
# add the compartments, nodes, and tags
merged.compartments += [TwoNodeCompartment(previous.nodes[-1],
b.nodes[0])]
merged.compartments += b.compartments
merged.nodes += b.nodes
merged.tags.update(b.tags)
# add .merged element to b
b.merged = merged
considerBranches.remove(b)
previous = b
# set nodeLocations in merged
merged._setNodeLocations()
# add merged to mergedBranches
self.mergedBranches.append(merged)
# find the neighbors and neighborLocations of the merged branches
for branch in self.branches:
merged = branch.merged
for n, (loc, nLoc, node) in zip(branch.neighbors,
branch.neighborLocations):
if n == branch:
# this is a loop
assert (loc == 0 and nLoc == 1) or (loc == 1 and nLoc == 0)
merged.neighbors.append(merged)
merged.neighborLocations.append((loc, nLoc, node))
else:
nMerged = n.merged
if nMerged == merged:
# both branches are part of merged, so no neighbor need be added
continue
# find the location of the neighbors on each merged branch
mergedLoc = merged.nodeLocations[merged.nodes.index(node)]
nMergedLoc = nMerged.nodeLocations[nMerged.nodes.index(node)]
# make them neighbors
_makeNeighbors(merged, nMerged, mergedLoc, nMergedLoc, node)
if makePlots:
self._plotBranchOrderStatistics()
def mergeBranchesByOrder(self, makePlots=True):
# merge branches together if one of a branch's neighbors is clearly a
# continuation of it (using centripetalOrder as method of determining
# continuation)
if not hasattr(self.soma, 'centripetalOrder') or \
self.soma.centripetalOrder is None:
self.calcBranchOrder(doPlot=False)
def _getMergeBranch(currentBranch):
# find the branch that should be merged with current branch
# and what the order of the merged branch should be
# if no branch should be merged, return None
orders0 = {}
orders1 = {}
mergeBranch = None
mergeCentripetalOrder = currentBranch.centripetalOrder + 1
mergeBranchOrder = currentBranch.branchOrder - 1
for n, (locC, locN, nodeC) in zip(currentBranch.neighbors,
currentBranch.neighborLocations):
assert locC in [0, 1] and locN in [0, 1]
#if locC not in [0, 1] or locN not in [0, 1]:
# # only merge at branch end points
# continue
if n.centripetalOrder == mergeCentripetalOrder and \
n.branchOrder == mergeBranchOrder:
if mergeBranch is not None:
# too many potential merges => this branch can't merge
return None
else:
# potential merge
mergeBranch = n
if mergeBranch is not None:
# check to make sure no other branch can merge onto mergeBranch:
for n in mergeBranch.neighbors:
if n != currentBranch and n.branchOrder == currentBranch.branchOrder\
and n.centripetalOrder == currentBranch.centripetalOrder:
# another branch could equally merge onto mergeBranch, which means
# that no merge can be preferred
return None
return mergeBranch
self.mergedBranches = []
visited = {b : False for b in self.branches}
openBranches = {b for b in self.branches if b.centripetalOrder == 0}
# find the new merged branches
while openBranches:
# get the next branch that could start a merged branch
current = openBranches.pop()
visited[current] = True
# mark all the neighbors as open branches
#openBranches.update(n for n in current.neighbors if not visited[n])
# create the merged branch that will hold current and anything that
# continues it
merged = Segment(self)
merged.name = 'MergedBranch%d' % len(self.mergedBranches)
# add the compartments, nodes, tags, and branch order
merged.compartments = current.compartments[:]
merged.nodes = current.nodes[:]
merged.tags = {t for t in current.tags}
merged.branchOrder = current.branchOrder
merged.centripetalOrder = current.centripetalOrder
# add .merged element to current
current.merged = merged
barf = (current.name, current.branchOrder, current.centripetalOrder)
# find the continuation, if any
current = _getMergeBranch(current)
while current is not None:
# don't allow any other branches to use current branch
openBranches.discard(current)
if visited[current]:
print('%s with tags: %s' % (current.name,
' '.join(t for t in current.tags)))
print(current.branchOrder, current.centripetalOrder)
for n in current.neighbors:
print('%s with tags: %s' % (n.name,
' '.join(t for t in n.tags)))
print(n.branchOrder, n.centripetalOrder)
print(barf)
if current.merged is not None:
print(current.merged.name)
for b in self.branches:
if hasattr(b, 'merged') and b.merged == current.merged:
print(b.name)
else:
print('None')
visited[current] = True
# add the compartments, nodes, and tags
merged.compartments[0:0] = [TwoNodeCompartment(current.nodes[-1],
merged.nodes[0])]
merged.compartments[0:0] = current.compartments
merged.nodes[0:0] = current.nodes
merged.tags.update(current.tags)
merged.branchOrder = current.branchOrder
merged.centripetalOrder = current.centripetalOrder
# add .merged element to current
current.merged = merged
# find the continuation, if any
barf = (current.name, current.branchOrder, current.centripetalOrder)
current = _getMergeBranch(current)
# set nodeLocations in merged
merged._setNodeLocations()
# add merged to mergedBranches
self.mergedBranches.append(merged)
# add any missed branches
if not openBranches:
try:
minUnvisitedOrder = min(b.centripetalOrder for b in self.branches
if not visited[b])
openBranches = {b for b in self.branches if not visited[b] and
b.centripetalOrder == minUnvisitedOrder}
except ValueError:
openBranches = {}
# find the neighbors and neighborLocations of the merged branches
for branch in self.branches:
merged = branch.merged
for n, (loc, nLoc, node) in zip(branch.neighbors,
branch.neighborLocations):
if n == branch:
# this is a loop
assert (loc == 0 and nLoc == 1) or (loc == 1 and nLoc == 0)
merged.neighbors.append(merged)
merged.neighborLocations.append((loc, nLoc, node))
else:
nMerged = n.merged
if nMerged == merged:
# both branches are part of merged, so no neighbor need be added
continue
# find the location of the neighbors on each merged branch
mergedLoc = merged.nodeLocations[merged.nodes.index(node)]
nMergedLoc = nMerged.nodeLocations[nMerged.nodes.index(node)]
# make them neighbors
_makeNeighbors(merged, nMerged, mergedLoc, nMergedLoc, node)
if makePlots:
self._plotBranchOrderStatistics()
def _addOneNodeCompartments(self):
"""
Add extra compartments to account for regions covered by nodes with only
one compartment attached (the end of the soma is probably the most
important)
"""
for segment in self.segments:
if not segment.nodes:
# empty segment
warn("Warning, empty segment: %s" % segment.name)
node0 = segment.nodes[0]
if len(node0.compartments) == 1:
self._addCompartment(segment, node0)
node1 = segment.nodes[-1]
if len(node1.compartments) == 1:
self._addCompartment(segment, node1)
def _getBranch(self, branchStart):
"""
self._getBranch(branchStart)
branchStart: starting compartment
returns branch, neighbors0, neighbors1
branch: a Segment with 0 or >= 2 neighbors at each endpoint, and no
neighbors in the middle
neighbors0, neighbors1: list of neighbors at 0 and 1 end respectively
each neighbor is a tuple (segment, position, compartment)
segment: the neighbor segment
position: the position (in the neighbor segment) of the connection
compartment: the neighbor compartment
Find the branch (bounded by compartments with more than two neighbors)
that starts with startCompartment. The end of each branch is a node with
a number of attached compartments not equal to 2.
NOTE: must add compartment-by-compartment (instead of by segments) because
segments can in principle have neighbors midway instead of at endpoints
NOTE: don't add _neighbors0 or _neighbors1 to _branch.neighbors, because
they are *Compartment* neighbors, and _branch will want *branch*
neighbors
"""
assert isinstance(branchStart, Compartment), \
'Must start a branch with a compartment'
# determine starting segment, position of starting compartment in segment
startSeg = branchStart.segment
startPos = branchStart.length / 2.0
for c in startSeg.compartments:
if c == branchStart:
break
startPos += c.length
# create the branch
branch = Segment(self)
branch.name = 'branch%d' % len(self.branches)
branch.tags = {t for t in startSeg.tags}
branch.tags.add(startSeg.name)
def _getBranchNeighbors(segment, startPos):
neighbors0, neighbors1 = [], []
pos0, nPos0, pos1, nPos1 = 0, None, 1, None
for neighbor, (location, nLocation, node) in zip(segment.neighbors,
segment.neighborLocations):
nComps = [c for c in node.compartments if c in neighbor.compartments]
# this assertion fails when a segment is its own neighbor:
#assert len(nComps) == (2 - (nLocation in [0, 1])), \
# 'Found %d neighbor compartments at %f' % (len(nComps), nLocation)
if location == 0:
neighbors0.append((neighbor, nLocation, nComps[0]))
if nLocation not in [0, 1]:
neighbors0.append((neighbor, nLocation, nComps[-1]))
elif location == 1:
neighbors1.append((neighbor, nLocation, nComps[0]))
if nLocation not in [0, 1]:
neighbors1.append((neighbor, nLocation, nComps[-1]))
elif location < startPos:
assert nLocation in [0, 1]
if location > pos0:
pos0 = location
farComp = segment.compartmentAt(location, choice=0)
neighbors0 = [(neighbor, nLocation, nComps[0]),
(segment, location, farComp)]
elif location == pos0:
farComp = segment.compartmentAt(location, choice=0)
neighbors0 += [(neighbor, nLocation, nComps[0]),
(segment, location, farComp)]
else:
assert nLocation in [0, 1]
if location < pos1:
pos1 = location
farComp = segment.compartmentAt(location, choice=1)
neighbors1 = [(neighbor, nLocation, nComps[0]),
(segment, location, farComp)]
else:
farComp = segment.compartmentAt(location, choice=1)
neighbors1 += [(neighbor, nLocation, nComps[0]),
(segment, location, farComp)]
return neighbors0, pos0, neighbors1, pos1
def _getBranchPart(segment, pos0, pos1):
# return the compartments and nodes in segment between pos0 and pos1
if pos0 == 0:
if pos1 == 1:
return segment.compartments[:], segment.nodes[:]
else:
pos0L = 0.0
else:
pos0L = pos0 * segment.length
if not segment.nodeLocations:
locs = list(cumsum(comp.length for comp in segment.compartments))
segment.nodeLocations = [loc / locs[-1] for loc in locs]
n0 = bisect_left(segment.nodeLocations, pos0)
assert segment.nodeLocations[n0] == pos0
n1 = bisect_left(segment.nodeLocations, pos1)
assert segment.nodeLocations[n1] == pos1
n1 -= 1
return segment.compartments[n1:n2], segment.nodes[n1:n2+1]
# find neighbors of startSeg
neighbors0, pos0, neighbors1, pos1 = \
_getBranchNeighbors(startSeg, startPos)
# get the part of startSeg between pos0 and pos1
branch.compartments, branch.nodes = _getBranchPart(startSeg, pos0, pos1)
while len(neighbors0) == 1:
# extend branch in 0 direction
# get the neighbor segment
segment, nPos0, nComp = neighbors0.pop()
# add the segment's name to branch tags
branch.tags.add(segment.name)
if nPos0 == 1:
# normal case, the neighbor is oriented in the same direction
# get the new neighbors0
neighbors0, pos0, nDummy, posDummy = _getBranchNeighbors(segment, 1)
# update branch with neighbor's segment, removing duplicate node
branch.compartments[0:0], branch.nodes[0:1] = \
_getBranchPart(segment, pos0, 1)
else:
# the neighbor is oriented backwards relative to startSeg
# get the new neighbors0
nDummy, posDummy, neighbors0, pos0 = _getBranchNeighbors(segment, 0)
# update branch with neighbor's segment, removing duplicate node
branch.compartments[0:0], branch.nodes[0:1] = (
reversed(L) for L in _getBranchPart(segment, 0, pos0))
while len(neighbors1) == 1:
# extend branch in 1 direction
# get the neighbor segment
segment, nPos1, nComp = neighbors1.pop()
# add the segment's name to branch tags
branch.tags.add(segment.name)
if nPos1 == 0:
# normal case, the neighbor is oriented in the same direction
# get the new neighbors1
nDummy, posDummy, neighbors1, pos1 = _getBranchNeighbors(segment, 0)
# update branch with neighbor's segment, removing duplicate node
numC, numN = len(branch.compartments), len(branch.nodes)
branch.compartments[numC:], branch.nodes[numN-1:] = \
_getBranchPart(segment, 0, pos1)
else:
# the neighbor is oriented backwards relative to startSeg
# get the new neighbors1
neighbors1, pos1, nDummy, posDummy = _getBranchNeighbors(segment, 1)
# update branch with neighbor's segment, removing duplicate node
numC, numN = len(branch.compartments), len(branch.nodes)
branch.compartments[numC:], branch.nodes[numN-1:] = (
reversed(L) for L in _getBranchPart(segment, pos1, 1))
return branch, neighbors0, neighbors1
@property
def soma(self):
if self._soma is None:
self._findSoma()
return self._soma
@property
def somaBranch(self):
if self._somaBranch is None:
self._findSoma()
return self._somaBranch[0]
def _findSoma(self):
"""
Find the compartment with the largest diameter (which is presumably in the
soma). Return the segment or branch that contains this compartment.
"""
# locate the largest radius compartment -it should be the soma
somaCenter = max(self.compartments, key=lambda c: c.maxRadius)
# get the segment that contains the soma (plus extra stuff)
soma = somaCenter.segment
self._soma = soma
# add the Soma tag to the segment, but not its compartments
soma.tags.add('Soma')
# find a cutoff on radius that defines the Soma proper
rCutoff = (soma.minRadius * soma.maxRadius**3)**0.25
# find the index to the center of the Soma
centerInd = soma.compartments.index(somaCenter)
# apply the Soma tag to contiguous stretch of large compartments near
# the center
self.tags['Soma'] = 0
for ind in range(centerInd, len(soma.compartments)):
c = soma.compartments[ind]
if c.avgRadius >= rCutoff:
c.tags.add('Soma')
self.tags['Soma'] += 1
else:
break
for ind in range(centerInd - 1, -1, -1):
c = soma.compartments[ind]
if c.avgRadius >= rCutoff:
c.tags.add('Soma')
self.tags['Soma'] += 1
else:
break
self._somaBranch = self._getBranch(somaCenter)
def findAxons(self, findBranch=False, debugInfo=False, minLength=10,
edgeSafety=2.0):
"""
Locate any axon branches
To be an axon branch, it must:
1. be terminal, but not contain the Soma
2. be longer then minLength
3. have its terminal node less than edgeSafety * node radius from the
furthest extent of the neuron in x, y, or z
"""
for segment in self.segments:
if len(segment.nodes) != len(segment.compartments) + 1:
raise AssertionError('%s has %d nodes and %d compartments'
% (segment.name, len(segment.nodes), len(segment.compartments)))
if self._axons is not None:
# already found the axons, return the results
if findBranch:
return self._axonBranches
else:
return self._axons
# need the Soma to be tagged
somaBranch = self.somaBranch
# need to know the range of the whole geometry
self._findGeometryRange()
# need the branches, because one criterion for axons is length, and
# one axon may be made up of many consecutive segments
self.findBranches()
def _isEdgeNode(_node, _safety):
_radius = _node.avgRadius
def _edgeCoord(_coord, _dim):
return _coord - _safety * _radius < self.minRange[_dim] or \
_coord + _safety * _radius > self.maxRange[_dim]
return _edgeCoord(_node.x, 0) or \
_edgeCoord(_node.y, 1) or \
_edgeCoord(_node.z, 2)
if debugInfo:
print('Edges: %6.1f %6.1f %6.1f' % tuple(self.minRange))
print(' %6.1f %6.1f %6.1f' % tuple(self.maxRange))
self._axons = list()
self._axonBranches = list()
for branch in self.branches:
if branch == somaBranch:
# it's the Soma
continue
if branch.length < minLength:
# too short to be sure it's an axon
continue
n0 = branch.neighborsAt(0)
n1 = branch.neighborsAt(1)
if n0 and n1:
# not terminal
continue
if debugInfo:
print('Possible axon: %s (%s)' % (branch.name,
' '.join(t for t in branch.tags)))
if not n0:
print('\t%d neighbors at 0.0' % len(n0))
node = branch.nodeAt(0)
print('\t node at 0.0: %.1f, %.1f, %.1f, %f' %
(node.x, node.y, node.z, node.r1))
if not n1:
print('\t%d neighbors at 1.0' % len(n1))
node = branch.nodeAt(1)
print('\t node at 1.0: %.1f, %.1f, %.1f, %f' %
(node.x, node.y, node.z, node.r1))
if ((not n0 and _isEdgeNode(branch.nodes[0], edgeSafety)) or
(not n1 and _isEdgeNode(branch.nodes[-1], edgeSafety))):
branch.addTag('Axon')
self._axonBranches.append(branch)
segments = {c.segment for c in branch.compartments}
if debugInfo:
print('Found axon with segments %s' %
' '.join(s.name for s in segments))
for s in segments:
s.addTag('Axon')
if s.isTerminal:
self._axons.append(s)
if findBranch:
return self._axonBranches
else:
return self._axons
def _findGeometryRange(self):
"""
Find the physical extent of the geometry in x,y,z
"""
if (not any(isnan(x) for x in self.minRange) and
not any(isnan(x) for x in self.maxRange)):
# the range has already been found/specified
return
self.minRange = [float('inf'), float('inf'), float('inf')]
self.maxRange = [float('-inf'), float('-inf'), float('-inf')]
def _updateRange(_nodeCoord, _rangeInd):
if _nodeCoord < self.minRange[_rangeInd]:
self.minRange[_rangeInd] = _nodeCoord
if _nodeCoord > self.maxRange[_rangeInd]:
self.maxRange[_rangeInd] = _nodeCoord
for _node in self.nodes:
_updateRange(_node.x, 0)
_updateRange(_node.y, 1)
_updateRange(_node.z, 2)
def _makeNeighbors(segment1, segment2, location1, location2, node,
checkDuplicate=False):
# make segment1 and segment2 neighbors
if checkDuplicate:
preExist = any(neighbor == segment2
and (loc, nLoc, n) == (location1, location2, node)
for neighbor, (loc, nLoc, n) in zip(segment1.neighbors,
segment1.neighborLocations))
if not preExist:
segment1.neighbors.append(segment2)
segment1.neighborLocations.append((location1, location2, node))
preExist = any(neighbor == segment1
and (loc, nLoc, n) == (location2, location1, node)
for neighbor, (loc, nLoc, n) in zip(segment2.neighbors,
segment2.neighborLocations))
if not preExist:
segment2.neighbors.append(segment1)
segment2.neighborLocations.append((location2, location1, node))
else:
segment1.neighbors.append(segment2)
segment1.neighborLocations.append((location1, location2, node))
segment2.neighbors.append(segment1)
segment2.neighborLocations.append((location2, location1, node))
def _removeNeighbor(segment, neighbor):
# remove neighbor from list of segment's neighbors
ind = segment.neighbors.index(neighbor)
segment.neighbors.pop(ind)
segment.neighborLocations.pop(ind)
def getBranchAngle(segment, neighbor, segLoc, nLoc, node):
# calculate angle between segment and its neighbor
# 2-node segment progression (produced from xml knossos skeletons)
if len(segment.nodes) == 2 and len(neighbor.nodes) == 2:
if segLoc == 0:
segNode = segment.nodes[0]
elif segLoc == 1:
segNode = segment.nodes[-1]
else:
ind = segment.nodes.index(node)
segNode = segment.nodes[ind] # why -1?
if nLoc == 0:
nNode = neighbor.nodes[0]
elif nLoc == 1:
nNode = neighbor.nodes[1]
else:
ind = neighbor.nodes.index(node)
nNode = neighbor.nodes[ind]
# normal progression
else:
if segLoc == 0:
segNode = segment.nodes[1]
elif segLoc == 1:
segNode = segment.nodes[-2]
else:
ind = segment.nodes.index(node)
segNode = segment.nodes[ind-1]
if nLoc == 0:
nNode = neighbor.nodes[1]
elif nLoc == 1:
nNode = neighbor.nodes[-2]
else:
ind = neighbor.nodes.index(node)
nNode = neighbor.nodes[ind+1]
segVec = (node.x - segNode.x, node.y - segNode.y, node.z - segNode.z)
nVec = (nNode.x - node.x, nNode.y - node.y, nNode.z - node.z)
dot = sum(sC * nC for sC, nC in zip(segVec, nVec))
segMag = sum(sC * sC for sC in segVec)
nMag = sum(nC * nC for nC in nVec)
if len(segment.nodes) > 2:
#cosAngle = max(-1.0, min(1.0, dot / sqrt(segment.length)))
cosAngle = max(-1.0, min(1.0, dot / sqrt(segMag * nMag))) ########## MAJOR EDIT
else:
print('2-node segment')
cosAngle = max(-1.0, min(1.0, dot / sqrt(segment.length)))
try:
angle = (180/pi) * acos(cosAngle)
except ValueError:
print(dot, segMag, nMag)
print(dot / sqrt(segMag * nMag))
raise
return angle
class Segment:
def __init__(self, geometry):
self.geometry = geometry
self.name = None
self.tags = set()
self.compartments = []
self.nodes = []
self.neighbors = []
self.nodeLocations = []
# neighborLocations are tuples:
# (location in this segment [0.0 - 1.0],
# location in neighbor segment [0.0 - 1.0],
# connecting Node)
self.neighborLocations = []
#self.volume = None
self.branchOrder = None
def neighborsAt(self, location):
return [n for n, (loc, nLoc, node)
in zip(self.neighbors, self.neighborLocations) if loc == location]
def _setNodeLocations(self):
try:
locs = list(cumsum(comp.length for comp in self.compartments))
self.nodeLocations = [loc / locs[-1] for loc in locs]
except:
self.nodeLocations = [0.0, 1.0]
def compartmentAt(self, location, choice=None):
if not self.nodeLocations:
self._setNodeLocations()
ind = bisect_left(self.nodeLocations, location)
if self.nodeLocations[ind] == location:
if choice == 0:
return self.compartments[ind - 1]
elif choice == 1:
return self.compartments[ind]
else:
assert self.nodeLocations[ind] != location, \
'Two compartments touch requested location'
else:
return self.compartments[ind - 1]
def nodeAt(self, location):
if not self.nodeLocations:
self._setNodeLocations()
ind = bisect_left(self.nodeLocations, location)
assert self.nodeLocations[ind] == location, 'No node at requested location'
return self.nodes[ind]
def coordAt(self, location):
if not self.nodeLocations:
self._setNodeLocations()
ind = bisect_left(self.nodeLocations, location)
if self.nodeLocations[ind] == location:
# exactly at a node:
n = self.nodes[ind]
return (n.x, n.y, n.z)
else:
# need to interpolate
i0 = ind - 1
n0 = self.nodes[i0]
n1 = self.nodes[ind]
cN0 = ( (self.nodeLocations[ind] - location) /
(self.nodeLocations[ind] - self.nodeLocations[i0]) )
cN1 = 1.0 - cN0
return (cN0 * n0.x + cN1 * n1.x, cN0 * n0.y + cN1 * n1.y,
cN0 * n0.z + cN1 * n1.z)
def clear(self):
if self.compartments:
for c in self.compartments:
for tag in c.tags:
self.geometry.tags[tag] -= 1
self.geometry.compartments = [c for c in self.geometry.compartments
if c not in self.compartments]
self.compartments = []
if self.nodes:
delNodes = []
for n in self.nodes:
n.segments.remove(self)
if not n.segments:
delNodes.append(n)
self.geometry.nodes = [n for n in self.geometry.nodes
if n not in delNodes]
self.nodes = []
def addTag(self, newTag):
"""
Add a new tag to the segment and all its compartments
"""
if newTag not in self.geometry.tags:
self.geometry.tags[newTag] = 0
self.tags.add(newTag)
for c in self.compartments:
c.tags.add(newTag)
self.geometry.tags[newTag] += 1
@property
def length(self):
return sum([c.length for c in self.compartments])
@property
def surfaceArea(self):
return sum([c.surfaceArea for c in self.compartments])
@property
def maxRadius(self):
# compute maximum radius
return max(c.maxRadius for c in self.compartments)
@property
def minRadius(self):
# compute minimum radius
return min(c.minRadius for c in self.compartments)
@property
def avgRadius(self):
# compute average radius, weighted by volume
return sum(c.avgRadius * c.volume for c in self.compartments) / \
sum(c.volume for c in self.compartments)
@property
def volume(self):
return sum(c.volume for c in self.compartments)
@property
def tortuosity(self):
n0 = self.nodes[0] ; n1 = self.nodes[-1]
if n0 == n1:
return float('inf')
euclideanD = sqrt((n0.x - n1.x)**2 + (n0.y - n1.y)**2 + (n0.z - n1.z)**2)
if euclideanD == 0:
return 1
else:
return self.length / euclideanD
@property
def isTerminal(self):
n0, n1 = False, False
for loc, nLoc, node in self.neighborLocations:
if loc == 0.0:
if n1:
return False
n0 = True
elif loc == 1.0:
if n0:
return False
n1 = True
return True
def lengthPerArea(self, _x1, _x2 = 0.5):
"""
Compute length per cross sectional area for segment
"""
if _x2 < _x1:
_temp = _x1
_x1 = _x2
_x2 = _temp
_lengths = [c.length for c in self.compartments]
_cumLengths = list(cumsum(_lengths))
def _findComp(_l):
for n in range(len(_lengths)):
if _cumLengths[n] >= _l:
_c = self.compartments[n]
_x = 1.0 - (_cumLengths[n] - _l) / _c.length
return (n, _c, _x)
# find 1st compartment, and proportion of distance across c1
(_compInd1, _c1, _cX1) = _findComp(_x1 * _cumLengths[-1])
# find 2nd compartment, and proportion of distance across c2
(_compInd2, _c2, _cX2) = _findComp(_x2 * _cumLengths[-1])
# compute length per area across distance
if _compInd2 == _compInd1:
return _c1.lengthPerArea(_cX1, _cX2)
else:
lPA = _c1.lengthPerArea(_cX1, 1.0) + _c2.lengthPerArea(0.0, _cX2)
_compInd2 -= 1
while _compInd2 > _compInd1:
lPA += self.compartments[_compInd2].lengthPerArea(0.0, 1.0)
_compInd2 -= 1
return lPA
def centroid(self, mandateTag=None):
# return the centroid of a Segment, weighted by volume
def _weightedC(_c):
# return the weighted centroid of a compartment
return tuple(_t * _c.volume / v for _t in _c.centroid)
if mandateTag is None:
v = self.volume
centroids = [_weightedC(c) for c in self.compartments]
else:
v = sum(c.volume for c in self.compartments if mandateTag in c.tags)
centroids = [_weightedC(c) for c in self.compartments if \
mandateTag in c.tags]
# sum up the centroids
centroid = (0.0, 0.0, 0.0)
for pos in centroids:
centroid = tuple(a + b for a,b in zip(centroid, pos))
return centroid
def centroidPosition(self, mandateTag=None):
# return the position of the centroid of a Segment, weighted by volume
# (in this case, position is a number from 0 to 1, denoting proportion of
# distance along the segment)
if mandateTag is None:
halfV = self.volume / 2
segLen = self.length
centroidLen = 0.0
for c in self.compartments:
if c.volume < halfV:
halfV -= c.volume
centroidLen += c.length
else:
cFrac = halfV / c.volume
centroidLen += c.length * cFrac
break
else:
v = sum(c.volume for c in self.compartments if mandateTag in c.tags)
halfV = v / 2
segLen = self.length
centroidLen = 0.0
for c in self.compartments:
if mandateTag in c.tags:
if c.volume < halfV:
halfV -= c.volume
centroidLen += c.length
else:
cFrac = halfV / c.volume
centroidLen += c.length * cFrac
break
else:
centroidLen += c.length
# return the position of the centroid
return centroidLen / segLen
class Node:
def __init__(self, _x, _y, _z, _r1, \
_r2=None, _r3=None, _theta=0.0, _phi=0.0):
self.x = _x
self.y = _y
self.z = _z
self.r1 = _r1
if self.r1 <= 0.0:
if self.r1 < 0:
raise ValueError('Encountered negative radius')
else:
raise ValueError('Encountered radius=0')
if _r2 is None:
if _r3 is not None:
raise ValueError(\
'Specify 1 radius for spherical nodes, 3 for ellipsoidal nodes')
self.r2 = _r1
self.r3 = _r1
else:
self.r2 = _r2
self.r3 = _r3
self.theta = _theta # angle from z axis
self.phi = _phi # angle of azimuth (from x axis to semi-major axis)
self.surface_area = None
self.volume = None
self.compartments = []
self.segments = []
self.tags = set()
@property
def maxRadius(self):
return min(self.r1, self.r2, self.r3)
@property
def minRadius(self):
return min(self.r1, self.r2, self.r3)
@property
def avgRadius(self):
return (self.r1 * self.r2 * self.r3)**(1.0/3.0)
def getElipse(self, node1):
"""
Return details of elipse from intersection with compartment defined by
connecting this node and node1
"""
if self.r2 != self.r1 or self.r3 != self.r1:
raise IOError('Currently cannot handle ellipsoidal nodes')
# unnecessary when dealing with spheres:
#(xAxis, yAxis, zAxis) = (node1.x - x, node1.y - y, node1.z - z)
return (self.r1, self.r1, 0.0, self.x, self.y, self.z)
class Compartment:
def __init__(self):
# do nothing, this is a pure virtual class
self._surfaceArea = None
self._volume = None
self._length = None
self.x0 = None
self.y0 = None
self.z0 = None
self.x1 = None
self.y1 = None
self.z1 = None
self.tags = set()
self.name = None
self.segment = None
self.nodes = None
@property
def neighbors(self):
"""
Find the compartment's neighbors
"""
raise RuntimeError('Compartment is a pure virtual class')
@property
def length(self):
if self._length is None:
self._length = self._calcLength()
return self._length
def _calcLength(self):
raise RuntimeError('Compartment is a pure virtual class')
@property
def surfaceArea(self):
if self._surfaceArea is None:
# compute surface area and convert from um^2 to mm^2
self._surfaceArea = 1.0e-6 * self._calcSurfaceArea()
return self._surfaceArea
def _calcSurfaceArea(self):
raise RuntimeError('Compartment is a pure virtual class')
@property
def volume(self):
if self._volume is None:
# compute volume and convert from um^3 to mm^3
self._volume = 1.0e-9 * self._calcVolume()
return self._volume
def _calcVolume(self):
raise RuntimeError('Compartment is a pure virtual class')
def lengthPerArea(self, _x1, _x2 = 0.5):
raise RuntimeError('Compartment is a pure virtual class')
@property
def maxRadius(self):
return max(n.maxRadius for n in self.nodes)
@property
def minRadius(self):
return min(n.minRadius for n in self.nodes)
@property
def avgRadius(self):
"""
Return an estimate of average radius, pretending compartment was a cylinder
or sphere (as appropriate to subclass)
"""
raise RuntimeError('Compartment is a pure virtual class')
@property
def centroid(self):
"""
return centroid of compartment as a tuple
"""
raise RuntimeError('Compartment is a pure virtual class')
class OneNodeCompartment(Compartment):
def __init__(self, node):
# init Compartment object
Compartment.__init__(self)
self.nodes = [node]
connectCompartment = node.compartments[0]
if connectCompartment.node0 == node:
node1 = connectCompartment.node1
else:
node1 = connectCompartment.node0
(self.semiMajor, self.semiMinor, self.theta, self.x0, self.y0, self.z0) = \
node.getElipse(node1)
_direction = (node.x - node1.x, node.y - node1.y, node.z - node1.z)
_norm = sqrt(sum([x*x for x in _direction]))
_scale = self.length / _norm
self.x1 = self.x0 + _scale * _direction[0]
self.y1 = self.y0 + _scale * _direction[1]
self.z1 = self.z0 + _scale * _direction[2]
_scale *= 3.0/8.0
self._centroid = (self.x0 + _scale * _direction[0], \
self.y0 + _scale * _direction[1], \
self.z0 + _scale * _direction[2])
@property
def node(self):
return self.nodes[0]
@property
def neighbors(self):
"""
Return a list of this compartments neighbors
"""
return [comp for comp in self.node.compartments if comp != self]
def _calcLength(self):
"""
compute and set length
"""
if self.node.r2 != self.node.r1 or self.node.r3 != self.node.r1:
raise IOError('Currently cannot handle ellipsoidal nodes')
return self.semiMajor
def _calcSurfaceArea(self):
"""
compute and set surface area
"""
if self.semiMajor == self.semiMinor:
return 4 * pi * self.semiMajor * self.semiMinor
else:
a = node.r1
b = node.r2
c = node.r3
p = 1.6075
# approximate formula accurate to relative error of <= 1.061%
return 4 * pi * ( ((a*b)**p + (a*c)**p + (b*c)**p)/3 )**(1/p)
def _calcVolume(self):
"""
compute and set volume
"""
return 4.0 * pi / 3.0 * self.node.r1 * self.node.r2 * self.node.r3
def lengthPerArea(self, _x1, _x2 = 0.5):
"""
compute and return length per area between relative positions _x1 and _x2
0 <= _x <= 1, _x2 defaults to 0.5
"""
if self.node.r2 != self.node.r1 or self.node.r3 != self.node.r1:
raise IOError('Currently cannot handle ellipsoidal nodes')
if _x2 < _x1:
_temp = _x2
_x2 = _x1
_x1 = _temp
return ( log((1.0 - _x1*_x1) / (1.0 - _x2*_x2)) /
(2 * pi * self.node.r1) )
@property
def avgRadius(self):
"""
Return an estimate of average radius, pretending compartment was a cylinder
or sphere (as appropriate to subclass)
"""
return (1.0e9 * self.volume / pi)**(1.0/3.0)
@property
def centroid(self):
"""
return centroid of compartment as a tuple
"""
return self._centroid
class TwoNodeCompartment(Compartment):
def __init__(self, node0, node1):
# init Compartment object
Compartment.__init__(self)
self.nodes = [node0, node1]
(self.semiMajor0, self.semiMinor0, self.theta0, self.x0, self.y0, self.z0)\
= node0.getElipse(node1)
(self.semiMajor1, self.semiMinor1, self.theta1, self.x1, self.y1, self.z1)\
= node1.getElipse(node0)
self._centroid = None
@property
def node0(self):
return self.nodes[0]
@property
def node1(self):
return self.nodes[-1]
@property
def neighbors(self):
"""
Return a list of this compartments neighbors
"""
neighbors = [comp for comp in self.node0.compartments if comp != self]
neighbors.extend(comp for comp in self.node1.compartments if \
comp != self and comp not in neighbors)
return neighbors
def _calcLength(self):
"""
compute and set length
"""
return sqrt((self.x1 - self.x0)**2 +
(self.y1 - self.y0)**2 +
(self.z1 - self.z0)**2)
def _calcSurfaceArea(self):
"""
compute and set surface area
"""
ratio0 = self.semiMinor0 / self.semiMajor0
ratio1 = self.semiMajor1 / self.semiMajor1
if isnan(ratio0):
if isnan(ratio1):
raise IOError('Degenerate (zero radius) compartment')
ratio0 = ratio1
elif isnan(ratio1):
ratio1 = ratio0
elif ratio0 != ratio1:
raise IOError('Don''t have formula for arbitrary eliptical frustrum')
if self.semiMinor0 == self.semiMinor1:
# cylinder
coneFactor = 1.0
elif self.length == 0:
# disk with hole
warn('Compartment with zero length')
return pi * abs(self.semiMajor0 * self.semiMinor0
- self.semiMajor1 * self.semiMinor1)
else:
# cone
coneFactor = \
sqrt(1 + ((self.semiMinor0 - self.semiMinor1)/self.length)**2)
if ratio0 == 1.0:
# circular cross section
angleFactor = pi
else:
# eliptical cross section
angleFactor = 2.0*special.ellipe(sqrt((1.0 - ratio0*ratio0)) /coneFactor)
return coneFactor * angleFactor * \
self.length * (self.semiMajor0 + self.semiMajor1)
def _calcVolume(self):
"""
compute and set volume
"""
return (pi / 3.0) * self.length * \
(self.semiMajor0 * self.semiMinor0 + self.semiMajor1 * self.semiMinor1 +\
0.5 * (self.semiMajor0 * self.semiMinor1 + \
self.semiMajor1 * self.semiMinor0))
def lengthPerArea(self, _x1, _x2 = 0.5):
"""
compute and return length per cross sectional area between relative
positions _x1 and _x2 (0 <= _x <= 1, _x2 defaults to 0.5)
"""
if _x2 < _x1:
_temp = _x2
_x2 = _x1
_x1 = _temp
if self.semiMajor0 == self.semiMajor1:
semiMajor = self.semiMajor0
if self.semiMinor0 == self.semiMinor1:
# cylinder
semiMinor = self.semiMinor0
coneFact = _x2 - _x1
else:
# semi-minor axis is changing, semi-major is constant
semiMinor = 0.5 * (self.semiMinor0 + self.semiMinor0)
minorRatio = semiMinor / (self.semiMinor1 - self.semiMinor0)
coneFact = minorRatio * \
log( (minorRatio + _x2 - 0.5) / (minorRatio + _x1 - 0.5) )
if coneFact < 0:
coneFact = -coneFact
else:
semiMajor = 0.5 * (self.semiMajor0 + self.semiMajor0)
majorRatio = semiMajor / (self.semiMajor1 - self.semiMajor0)
if self.semiMinor0 == self.semiMinor1:
# semi-major axis is changing, semi-minor is constant
semiMinor = self.semiMinor0
coneFact = majorRatio * \
log( (majorRatio + _x2 - 0.5) / (majorRatio + _x1 - 0.5) )
if coneFact < 0:
coneFact = -coneFact
else:
semiMinor = 0.5 * (self.semiMinor0 + self.semiMinor0)
minorRatio = semiMinor / (self.semiMinor1 - self.semiMinor0)
ratioProd = majorRatio * minorRatio
avgRatio = 0.5 * (minorRatio + majorRatio)
scale1 = (_x1 - 0.5) / ratioProd + avgRatio
scale2 = (_x2 - 0.5) / ratioProd + avgRatio
if avgRatio > 1.0:
# answer in terms of logs
ratioRoot = sqrt(avgRatio*avgRatio - 1.0)
scale1 /= ratioRoot
scale2 /= ratioRoot
coneFact = 0.5 * ratioProd / ratioRoot * \
log((scale2 - 1) * (scale1 + 1) / ((scale2 + 1) * scale1 - 1))
elif avgRatio < 1.0:
# answer in terms of atan
ratioRoot = sqrt(1.0 - avgRatio*avgRatio)
scale1 /= ratioRoot
scale2 /= ratioRoot
coneFact = ratioProd / ratioRoot * (atan(scale2) - atan(scale1))
else:
# answer in terms of 1/x
coneFact = ratioProd / scale1 - ratioProd / scale2
return coneFact * self.length / (pi * semiMajor * semiMinor)
@property
def avgRadius(self):
"""
Return an estimate of average radius, pretending compartment was a cylinder
or sphere (as appropriate to subclass)
"""
try:
return sqrt(1.0e9 * self.volume / self.length / pi)
except ZeroDivisionError as err:
if self.length == 0:
return 0.5 * (self.semiMajor0 + self.semiMajor1)
raise err
@property
def centroid(self):
"""
Return centroid of the compartment as a tuple
"""
if self._centroid is None:
# need to calculate centroid location
weightedLength = 0.5 * pi * self.length*self.length * \
((self.semiMajor0 * self.semiMinor0 + \
self.semiMajor0 * self.semiMinor1 + \
self.semiMajor1 * self.semiMinor0) / 6.0 + \
0.5 * self.semiMajor1 * self.semiMinor1)
ratio1 = weightedLength / (1.0e9 * self.volume) / self.length
ratio0 = 1.0 - ratio1
self._centroid = (self.x0 * ratio0 + self.x1 * ratio1, \
self.y0 * ratio0 + self.y1 * ratio1,
self.z0 * ratio0 + self.z1 * ratio1)
return self._centroid
| gpl-3.0 |
bnaul/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 81 | 2525 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired,
edgecolor='black', s=20)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
larsmans/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 250 | 2233 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
metaml/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/interpolate.py | 73 | 7068 | import numpy as np
from matplotlib._delaunay import compute_planes, linear_interpolate_grid, nn_interpolate_grid
from matplotlib._delaunay import nn_interpolate_unstructured
__all__ = ['LinearInterpolator', 'NNInterpolator']
def slice2gridspec(key):
"""Convert a 2-tuple of slices to start,stop,steps for x and y.
key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))
For now, the only accepted step values are imaginary integers (interpreted
in the same way numpy.mgrid, etc. do).
"""
if ((len(key) != 2) or
(not isinstance(key[0], slice)) or
(not isinstance(key[1], slice))):
raise ValueError("only 2-D slices, please")
x0 = key[1].start
x1 = key[1].stop
xstep = key[1].step
if not isinstance(xstep, complex) or int(xstep.real) != xstep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
xstep = int(xstep.imag)
y0 = key[0].start
y1 = key[0].stop
ystep = key[0].step
if not isinstance(ystep, complex) or int(ystep.real) != ystep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
ystep = int(ystep.imag)
return x0, x1, xstep, y0, y1, ystep
class LinearInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
using the planes defined by the three function values at each corner of
the triangles.
LinearInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Attributes:
planes -- (ntriangles, 3) array of floats specifying the plane for each
triangle.
Linear Interpolation
--------------------
Given the Delauany triangulation (or indeed *any* complete triangulation) we
can interpolate values inside the convex hull by locating the enclosing
triangle of the interpolation point and returning the value at that point of
the plane defined by the three node values.
f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2]
The interpolated function is C0 continuous across the convex hull of the
input points. It is C1 continuous across the convex hull except for the
nodes and the edges of the triangulation.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
self.planes = compute_planes(triangulation.x, triangulation.y, self.z,
triangulation.triangle_nodes)
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = linear_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.planes, self.triangulation.x, self.triangulation.y,
self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors)
return grid
class NNInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
the natural neighbors method.
NNInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Natural Neighbors Interpolation
-------------------------------
One feature of the Delaunay triangulation is that for each triangle, its
circumcircle contains no other point (although in degenerate cases, like
squares, other points may be *on* the circumcircle). One can also construct
what is called the Voronoi diagram from a Delaunay triangulation by
connecting the circumcenters of the triangles to those of their neighbors to
form a tesselation of irregular polygons covering the plane and containing
only one node from the triangulation. Each point in one node's Voronoi
polygon is closer to that node than any other node.
To compute the Natural Neighbors interpolant, we consider adding the
interpolation point to the triangulation. We define the natural neighbors of
this point as the set of nodes participating in Delaunay triangles whose
circumcircles contain the point. To restore the Delaunay-ness of the
triangulation, one would only have to alter those triangles and Voronoi
polygons. The new Voronooi diagram would have a polygon around the inserted
point. This polygon would "steal" area from the original Voronoi polygons.
For each node i in the natural neighbors set, we compute the area stolen
from its original Voronoi polygon, stolen[i]. We define the natural
neighbors coordinates
phi[i] = stolen[i] / sum(stolen,axis=0)
We then use these phi[i] to weight the corresponding function values from
the input data z to compute the interpolated value.
The interpolated surface is C1-continuous except at the nodes themselves
across the convex hull of the input points. One can find the set of points
that a given node will affect by computing the union of the areas covered by
the circumcircles of each Delaunay triangle that node participates in.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = nn_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
def __call__(self, intx, inty):
intz = nn_interpolate_unstructured(intx, inty, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return intz
| agpl-3.0 |
Melclic/HMG | py_wrapper/melPywafo.py | 1 | 35078 | ##
# @file melPywafo.py
#
# Contains all the functions required for the analysis of OD growth curves. Includes visual inspection of the gowth curve with first and second derivative to identify the exponential section of the growth curves. This contains user defined parameters for the smoothing parameter and the minimal length required for the growth curve. Using smoothing spline function, the volumetric changes of the growth curve, its instantaneous growth rate post exponential and exponential growth rate are returned. Volumetric changes are calculated based on the Volkmer et al. mean cell volume based on growth rate function. The smoothing spline (also called piecewise polynolmial) has been extracted from the pywafo packges
#
# @version 1.0
# @author Melchior du Lac
#
#TODO: use the expoentnial section of the growth curve detection as an indicator of section of the growth curve that is exponential, and have user defined input of start and end of the growht curve.
from __future__ import division
import numpy as np
import scipy.signal
import scipy.sparse as sparse
from numpy import ones, zeros, prod, sin, diff, pi, inf, vstack, linspace
from scipy.interpolate import interp1d
import math
import csv
import polynomial as pl
import matplotlib.pyplot as plt
############## Utility function to find the nearest function
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return idx, array[idx]
#########################################################################################
################################### pywafo objects ######################################
#########################################################################################
class PPform(object):
"""The ppform of the piecewise polynomials
is given in terms of coefficients and breaks.
The polynomial in the ith interval is
x_{i} <= x < x_{i+1}
S_i = sum(coefs[m,i]*(x-breaks[i])^(k-m), m=0..k)
where k is the degree of the polynomial.
Example
-------
>>> import matplotlib.pyplot as plt
>>> coef = np.array([[1,1]]) # unit step function
>>> coef = np.array([[1,1],[0,1]]) # linear from 0 to 2
>>> coef = np.array([[1,1],[1,1],[0,2]]) # linear from 0 to 2
>>> breaks = [0,1,2]
>>> self = PPform(coef, breaks)
>>> x = linspace(-1,3)
>>> h=plt.plot(x,self(x))
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False, a=None, b=None):
if sort:
self.breaks = np.sort(breaks)
else:
self.breaks = np.asarray(breaks)
if a is None:
a = self.breaks[0]
if b is None:
b = self.breaks[-1]
self.coeffs = np.asarray(coeffs)
self.order = self.coeffs.shape[0]
self.fill = fill
self.a = a
self.b = b
def __call__(self, xnew):
saveshape = np.shape(xnew)
xnew = np.ravel(xnew)
res = np.empty_like(xnew)
mask = (self.a <= xnew) & (xnew <= self.b)
res[~mask] = self.fill
xx = xnew.compress(mask)
indxs = np.searchsorted(self.breaks[:-1], xx) - 1
indxs = indxs.clip(0, len(self.breaks))
pp = self.coeffs
dx = xx - self.breaks.take(indxs)
v = pp[0, indxs]
for i in range(1, self.order):
v = dx * v + pp[i, indxs]
values = v
res[mask] = values
res.shape = saveshape
return res
def linear_extrapolate(self, output=True):
'''
Return 1D PPform which extrapolate linearly outside its basic interval
'''
max_order = 2
if self.order <= max_order:
if output:
return self
else:
return
breaks = self.breaks.copy()
coefs = self.coeffs.copy()
# pieces = len(breaks) - 1
# Add new breaks beyond each end
breaks2add = breaks[[0, -1]] + np.array([-1, 1])
newbreaks = np.hstack([breaks2add[0], breaks, breaks2add[1]])
dx = newbreaks[[0, -2]] - breaks[[0, -2]]
dx = dx.ravel()
# Get coefficients for the new last polynomial piece (a_n)
# by just relocate the previous last polynomial and
# then set all terms of order > maxOrder to zero
a_nn = coefs[:, -1]
dxN = dx[-1]
a_n = pl.polyreloc(a_nn, -dxN) # Relocate last polynomial
# set to zero all terms of order > maxOrder
a_n[0:self.order - max_order] = 0
# Get the coefficients for the new first piece (a_1)
# by first setting all terms of order > maxOrder to zero and then
# relocate the polynomial.
# Set to zero all terms of order > maxOrder, i.e., not using them
a_11 = coefs[self.order - max_order::, 0]
dx1 = dx[0]
a_1 = pl.polyreloc(a_11, -dx1) # Relocate first polynomial
a_1 = np.hstack([zeros(self.order - max_order), a_1])
newcoefs = np.hstack([a_1.reshape(-1, 1), coefs, a_n.reshape(-1, 1)])
if output:
return PPform(newcoefs, newbreaks, a=-inf, b=inf)
else:
self.coeffs = newcoefs
self.breaks = newbreaks
self.a = -inf
self.b = inf
def derivative(self):
"""
Return first derivative of the piecewise polynomial
"""
cof = pl.polyder(self.coeffs)
brks = self.breaks.copy()
return PPform(cof, brks, fill=self.fill)
def integrate(self):
"""
Return the indefinite integral of the piecewise polynomial
"""
cof = pl.polyint(self.coeffs)
pieces = len(self.breaks) - 1
if 1 < pieces:
# evaluate each integrated polynomial at the right endpoint of its
# interval
xs = diff(self.breaks[:-1, ...], axis=0)
index = np.arange(pieces - 1)
vv = xs * cof[0, index]
k = self.order
for i in range(1, k):
vv = xs * (vv + cof[i, index])
cof[-1] = np.hstack((0, vv)).cumsum()
return PPform(cof, self.breaks, fill=self.fill)
class SmoothSpline(PPform):
"""
Cubic Smoothing Spline.
Parameters
----------
x : array-like
x-coordinates of data. (vector)
y : array-like
y-coordinates of data. (vector or matrix)
p : real scalar
smoothing parameter between 0 and 1:
0 -> LS-straight line
1 -> cubic spline interpolant
lin_extrap : bool
if False regular smoothing spline
if True a smoothing spline with a constraint on the ends to
ensure linear extrapolation outside the range of the data (default)
var : array-like
variance of each y(i) (default 1)
Returns
-------
pp : ppform
If xx is not given, return self-form of the spline.
Given the approximate values
y(i) = g(x(i))+e(i)
of some smooth function, g, where e(i) is the error. SMOOTH tries to
recover g from y by constructing a function, f, which minimizes
p * sum (Y(i) - f(X(i)))^2/d2(i) + (1-p) * int (f'')^2
Example
-------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0,1)
>>> y = np.exp(x)+1e-1*np.random.randn(x.size)
>>> pp9 = SmoothSpline(x, y, p=.9)
>>> pp99 = SmoothSpline(x, y, p=.99, var=0.01)
>>> h=plt.plot(x,y, x,pp99(x),'g', x,pp9(x),'k', x,np.exp(x),'r')
See also
--------
lc2tr, dat2tr
References
----------
Carl de Boor (1978)
'Practical Guide to Splines'
Springer Verlag
Uses EqXIV.6--9, self 239
"""
def __init__(self, xx, yy, p=None, lin_extrap=True, var=1):
coefs, brks = self._compute_coefs(xx, yy, p, var)
super(SmoothSpline, self).__init__(coefs, brks)
if lin_extrap:
self.linear_extrapolate(output=False)
def _compute_coefs(self, xx, yy, p=None, var=1):
x, y = np.atleast_1d(xx, yy)
x = x.ravel()
dx = np.diff(x)
must_sort = (dx < 0).any()
if must_sort:
ind = x.argsort()
x = x[ind]
y = y[..., ind]
dx = np.diff(x)
n = len(x)
# ndy = y.ndim
szy = y.shape
nd = prod(szy[:-1])
ny = szy[-1]
if n < 2:
raise ValueError('There must be >=2 data points.')
elif (dx <= 0).any():
raise ValueError('Two consecutive values in x can not be equal.')
elif n != ny:
raise ValueError('x and y must have the same length.')
dydx = np.diff(y) / dx
if (n == 2): # % straight line
coefs = np.vstack([dydx.ravel(), y[0, :]])
else:
dx1 = 1. / dx
D = sparse.spdiags(var * ones(n), 0, n, n) # The variance
u, p = self._compute_u(p, D, dydx, dx, dx1, n)
dx1.shape = (n - 1, -1)
dx.shape = (n - 1, -1)
zrs = zeros(int(nd))
if p < 1:
# faster than yi-6*(1-p)*Q*u
Qu = D * diff(vstack([zrs, diff(vstack([zrs, u, zrs]),
axis=0) * dx1, zrs]), axis=0)
ai = (y - (6 * (1 - p) * Qu).T).T
else:
ai = y.reshape(n, -1)
# The piecewise polynominals are written as
# fi=ai+bi*(x-xi)+ci*(x-xi)^2+di*(x-xi)^3
# where the derivatives in the knots according to Carl de Boor are:
# ddfi = 6*p*[0;u] = 2*ci;
# dddfi = 2*diff([ci;0])./dx = 6*di;
# dfi = diff(ai)./dx-(ci+di.*dx).*dx = bi;
ci = np.vstack([zrs, 3 * p * u])
di = (diff(vstack([ci, zrs]), axis=0) * dx1 / 3)
bi = (diff(ai, axis=0) * dx1 - (ci + di * dx) * dx)
ai = ai[:n - 1, ...]
if nd > 1:
di = di.T
ci = ci.T
ai = ai.T
if not any(di):
if not any(ci):
coefs = vstack([bi.ravel(), ai.ravel()])
else:
coefs = vstack([ci.ravel(), bi.ravel(), ai.ravel()])
else:
coefs = vstack(
[di.ravel(), ci.ravel(), bi.ravel(), ai.ravel()])
return coefs, x
def _compute_u(self, p, D, dydx, dx, dx1, n):
if p is None or p != 0:
data = [dx[1:n - 1], 2 * (dx[:n - 2] + dx[1:n - 1]), dx[:n - 2]]
R = sparse.spdiags(data, [-1, 0, 1], n - 2, n - 2)
if p is None or p < 1:
Q = sparse.spdiags(
[dx1[:n - 2], -(dx1[:n - 2] + dx1[1:n - 1]), dx1[1:n - 1]],
[0, -1, -2], n, n - 2)
QDQ = (Q.T * D * Q)
if p is None or p < 0:
# Estimate p
p = 1. / \
(1. + QDQ.diagonal().sum() /
(100. * R.diagonal().sum() ** 2))
if p == 0:
QQ = 6 * QDQ
else:
QQ = (6 * (1 - p)) * (QDQ) + p * R
else:
QQ = R
# Make sure it uses symmetric matrix solver
ddydx = diff(dydx, axis=0)
# sp.linalg.use_solver(useUmfpack=True)
u = 2 * sparse.linalg.spsolve((QQ + QQ.T), ddydx) # @UndefinedVariable
return u.reshape(n - 2, -1), p
#########################################################################################
#########################################################################################
##
# @brief Parser of time series histogram of bacterial DNA content, stained with DAPI.
#
# Given the histogram output flow cytometry analysis of bacterial population, this function extracts the data from a CSV format to python arrays to be used for further analysis by saving it into a pickle. Must provide it with first order polynomial that determines the channel to DNA peaks.
#
# @param inputFile Path to the CSV file with histograms
# @param polyChannel Array with the parameters of the first order polynomial
#
# @return times Array of the time series snapshots of histograms
# @return tmpData 2D array of the y-axis normalised histogram values
# @return scale 1D array of the x-axis DNA scale of the histograms
#
def parseFlowData(inputFile, polyChannel):
data = []
sortedData = []
sortedScale = []
channels = []
times = []
with open(inputFile, 'r') as f:
reader = csv.reader(f)
isFirst = True
for row in reader:
if isFirst:
for y in range(1, len(row)):
data.append([])
sortedData.append([])
sortedScale.append([])
times.append(float(row[y])*60.0)
isFirst = False
else:
channels.append(float(row[0]))
for y in range(1,len(row)):
data[y-1].append(float(row[y]))
#hist_channels = []
#for i in range(len(channels)):
# if i==0:
# hist_channels.append(channels[0]-(((channels[0]+channels[1])/2)-channels[0]))
# hist_channels.append((channels[i]+channels[i+1])/2)
# elif i==len(channels)-1:
# hist_channels.append(channels[-1]+(((channels[-1]+channels[-2])/2)-channels[-1]))
# else:
# hist_channels.append((channels[i]+channels[i+1])/2)
#scale = [polyChannel[0]*math.pow(x,3.0) + polyChannel[1]*math.pow(x,2.0) + polyChannel[2]*x + polyChannel[3] for x in hist_channels]
#scale = [polyChannel[0]*math.pow(x,2.0)+polyChannel[1]*x+polyChannel[2] for x in hist_channels]
#scale = [polyChannel[0]*x+polyChannel[1] for x in hist_channels]
#scale = [polyChannel[0]*math.pow(x,2.0)+polyChannel[1]*x+polyChannel[2] for x in range(len(channels))]
#scale = [0.04183*i+0.03283 for i in range(len(channels))]
#scale = [0.0007466*math.pow(x,2.0)+0.008022*x+0.7751 for x in range(len(channels))]
#scale = [0.04401*x-0.03389 for x in range(len(channels))]
scale = [polyChannel[0]*x+polyChannel[1] for x in range(len(channels))]
scale.append(14169.0)
#for i in range(len(scale)):
# if i<1792:
# scale[i] = 0.002556*i-3.578
#scale.append(80.0)
#scale = [polyChannel[0]*math.exp(polyChannel[1]*x) for x in range(len(channels))]
#scale.append(160.0) #<-- it does not matter what we add since it is much larger
#scale = [polyChannel[0]*x+polyChannel[1] for x in range(len(channels))]
#scale.append(12.0) #<-- it does not matter what we add since it is much larger
tmpData = []
for i in data:
tmpData.append([y/sum(i) for y in i])
#for dataIndex in range(len(data)):
# a = zip(data[dataIndex], scale)
# a = list(a)
# b = [i for i in a if i[1]>=0.01 and i[1]<=220.0]
# b.append(160.0)
# sortedScale[dataIndex] = [i[1] for i in b]
# sortedScale[dataIndex].append(scale[scale.index(sortedScale[0][-1])+1])
# sortedData[dataIndex] = [i[0] for i in b]
#sortedData[dataIndex] = [i/sum(sortedData[dataIndex]) for i in sortedData[dataIndex]]
# sortedData[dataIndex] = [i/sum(sortedData[dataIndex]) if sum(sortedData[dataIndex])>0.0 else 0 for i in sortedData[dataIndex]]
#return times, sortedData, sortedScale
return times, tmpData, scale
##
# @brief Fill the gaps in the OD input data through linear interpolation
#
# Because the smoothing spline algorithm does not deal well with gaps in data, if that is the case we fill the gaps assuming that the difference between rwo points is lienar. Two points is determined two have a gap if distance between the two is larger than the mean of the sampling intervals. The gap is then filled assuming linear interpolation with equal sampling frequency.
#
# @param time 1D array of x-axis of the OD growth curve
# @param od 1D array of the y-axis of the OD growth curve
#
# @return time Filled and parsed (minutes) x-axis of the growth curve
# @return od Filled and parsed y-axis of the growth curve
# @return parcentMissing Calculated percentage of the data that hase been filled
#
return time, od, percentMissing
#TODO: fix WARNING: Buggy, if there is nothing to fill, then this method attempts to divide by 0
def gapFill(time, od):
oldODLen = len(od)
oldTimeLen = len(time)
#calculate the intervals between each time points
gapArray = []
for i in range(len(time)-1):
gapArray.append(abs(time[i]-time[i+1]))
meanGap = sum(gapArray)/float(len(gapArray))
#gap values for time and OD that are bigger than twice the mean where we will fill them
gapsTime = []
gapsOD = []
#gap values that value is less than the mean and we will not fill
nonGaps = []
count = 0
for i in gapArray:
if i>(2.0*meanGap):
gapsTime.append([time[count],time[count+1]])
gapsOD.append([od[count],od[count+1]])
elif i<meanGap:
nonGaps.append(i)
count += 1
nonGapMean = 0.0
if len(nonGaps)==0:
return time, od, 0.0
nonGapMean = sum(nonGaps)/float(len(nonGaps))
#Fill the gaps using linear interpolation
for i in range(len(gapsTime)):
linearFit = interp1d(gapsTime[i], gapsOD[i])
filledTime = np.arange(gapsTime[i][0]+nonGapMean, gapsTime[i][1], nonGapMean) #+1 to include the last value
filledOD = linearFit(filledTime)
for i in range(len(time)-1):
if time[i]<filledTime[0] and time[i+1]>filledTime[-1]:
posAdd = i
for y in range(len(filledTime)):
posAdd += 1
time.insert(posAdd,filledTime[y])
od.insert(posAdd,filledOD[y])
break
percentMissing = 0.0
if(oldTimeLen<len(time)):
percentMissing = math.ceil(100.0*(len(od)-oldODLen)/len(od))
return time, od, percentMissing
##
# @brief Fill the end of the growth curve that is descending with flat OD changes
#
# Because our modelling strategy cannot handle diminishing growth curves, we replace the death phase with linear unchanging growth. Future plans involve the implementation of cell death so as to be able to model decreasing OD values.
#
# @param od 1D array with the OD values
#
# @return od 1D array with parsed OD values
#
def linearEnds(od):
startPin = np.median(od[0:5])
endPin = np.median(od[len(od)-5:-1])
if od[0]!=min(od):
for i in range(5):
od.insert(i, startPin)
if od[-1]!=max(od):
for i in range(len(od), len(od)-5, -1):
od.insert(i, endPin)
return od
#################################################################
############################# Plot ##############################
#################################################################
##
# @brief Visual inspection of the growth curve with currently calculated exponential phase, with first and second order polynomials
#
# To check that the detected exponential section of the growth curve indeed is valid, we plot the OD growth curve (log scale), its first and second orde differential. Theoretically, the section of the growth curve that is linear should be the section of the growth curve where the second order differential is linear.
#
# @param fitTime Spline interpolation fit time output with dt as time step
# @param fitOD Spline interpolation fit OD output with dt as time step
# @param time Original 1D array of the time of the growth curve
# @param od Original 1D array of the OD of the growth curve
# @param diffOD First order differential of fitOD (not used)
# @param isNegOD Boolean determining if there are any parts of the fit growth curve that is negative and thus invalid
# @param isNegDeriv Boolean determining if there are any parts of the first order differential of the growth curve that is negative and thus inavalid
# @param expoTau Calculated doubling rate of the exponential section of the growth curve
# @param startLinear Time in minutes of the start of the linear phase
# @param stopLinear Time in minutes of the stop of the linear phase
#
def plotIt(fitTime, fitOD, time, od, diffOD, isNegOD, isNegDeriv, expoTau, startLinear, stopLinear):
plt.subplot(4,1,1)
plt.title('Valid OD: '+str(not isNegOD)+' | Valid diff(OD): '+str(not isNegDeriv))
plt.plot(time, od)
plt.ylabel('OD')
plt.subplot(4,1,2)
#plt.semilogy(time, od, 'o', label='Original Data')
#plt.semilogy(fitTime, fitOD, label='Spline Fit')
plt.plot(time, np.log(od), 'o', label='Original Data')
plt.plot(fitTime, np.log(fitOD), label='Spline Fit')
#plt.axvline(maxOD, color='red', label='Max OD ('+str(round(expoTau,2))+'min-1)')
if not startLinear==-1.0 and not stopLinear==-1.0:
plt.axvline(startLinear, color='red', label='Start Linear (tau: '+str(round(expoTau,2))+')')
plt.axvline(stopLinear, color='yellow', label='Stop Linear')
plt.xlabel('Time (min)')
plt.ylabel('ln(OD)')
#plt.title('Valid OD: '+str(not isNegOD)+' | Valid diff(OD): '+str(not isNegDeriv))
plt.legend(loc=4)
plt.subplot(4,1,3)
#plt.plot(fitTime, diffOD)
#startLoc = -1.0
#for i in range(len(fitTime)):
# if fitTime[i]>=startLinear:
# startLoc = i
# break
#ft = fitTime[:]
#ft = ft[0:-1]
#if not startLinear==-1.0 and not stopLinear==-1.0:
# startLoc = find_nearest(fitTime, startLinear)[0]
# endLoc = find_nearest(fitTime, stopLinear)[0]
# print(len(fitTime[startLoc:endLoc]))
# print(len(np.diff(np.log(fitOD))[startLoc:endLoc]))
# plt.plot(fitTime[startLoc:endLoc], np.diff(np.log(fitOD))[startLoc:endLoc])
#else:
# plt.plot(fitTime[0:-1], np.diff(np.log(fitOD)))
plt.plot(fitTime[0:-1], np.diff(np.log(fitOD)))
#plt.plot(ft[startLoc:len(ft)], np.gradient(np.log(fitOD)[startLoc:-1]))
plt.xlabel('Time (min)')
plt.ylabel('F1(ln(OD))')
plt.subplot(4,1,4)
#if not startLinear==-1.0 and not stopLinear==-1.0:
# startLoc = find_nearest(fitTime, startLinear)[0]
# endLoc = find_nearest(fitTime, stopLinear)[0]
# print(len(fitTime[startLoc:endLoc]))
# print(len(np.diff(np.log(fitOD), 2)[startLoc:endLoc]))
# #plt.plot(fitTime[startLoc:-1], np.diff(np.log(fitOD)))[startLoc:-1]
# plt.plot(fitTime[startLoc:endLoc], np.diff(np.log(fitOD), 2)[startLoc:endLoc])
#else:
# plt.plot(fitTime[0:-2], np.diff(np.log(fitOD), 2))
plt.plot(fitTime[0:-2], np.diff(np.log(fitOD), 2))
plt.xlabel('Time (min)')
plt.ylabel('F2(ln(OD))')
plt.show()
"""
plt.semilogy(time, od, 'o', label='Original Data')
plt.semilogy(fitTime, fitOD, label='Spline Fit')
#plt.plot(time, od, 'o', label='Original Data')
#plt.plot(fitTime, fitOD, label='Spline Fit')
plt.axvline(maxOD, color='red', label='Max OD')
plt.xlabel('Time (min)')
plt.ylabel('OD')
plt.title('Valid OD: '+str(not isNegOD)+' | Valid diff(OD): '+str(not isNegDeriv))
plt.legend(loc=4)
plt.show()
"""
################################################################
################################################################
################################################################
##
# @brief Try a new smoothing parameter
#
# Because this is user defined smoothing and exponential window determined function, we must recalculate the fitOD
#
# @param fitTime 1D array of the fitted time using dt as tim step
# @param time Original time array
# @param od Original od array
# @param p Smoothing parameter
# @param dt Time step
# @param confidenceBound confidence of the linear fit to the OD growth curve
# @param minFlatSize Minimal size of the array that is the exponential section of the growth curve
#
# @return fitOD Spline fit OD values
# @return fitDerivOD Spline fit first order derivative OD values
# @return maxOD Maximal OD values of the spline fit
# @return isNegOD Boolean determining if the spline fit contains negative OD values
# @return isNegDeriv Boolean determining the the first order derivative of the spline fit contains decreasing values
# @return expoTau Growth rate of the exponential section of the growth curve
# @return ss SmoothingSpline object
# @return startLinear Start of the linear section of the growth curve
# @return stopLinear End of the linear section of the growth curve
#
def tryNewP(fitTime, time, od, p, dt, confidenceBound, minFlatSize, ignoreTime):
ss = SmoothSpline(time, od, p=p)
fitOD = ss(fitTime)
ssDeriv = ss.derivative()
#fitDerivOD = ssDeriv(fitTime).tolist()
fitDerivOD = np.gradient(np.log(fitOD))
#lnDeriv = np.diff(np.log(fitOD)).tolist()
lnDeriv = np.gradient(np.log(fitOD)).tolist()
#cannot use max() function because it may be at time 0 --> data we cannot trust
#need to identify and ignore the lag phase
#remove anythin with an OD of X
maxOD = fitTime[lnDeriv.index(max(lnDeriv))]
#the linear growth rate
numSide = int(10.0/dt)
fitTime = fitTime.tolist()
expoTime = fitTime[fitTime.index(maxOD)-numSide:fitTime.index(maxOD)+numSide]
expoOD = fitOD[fitTime.index(maxOD)-numSide:fitTime.index(maxOD)+numSide]
#test: identify the linear part of growth: second derivative of the natural lof of the od curve. If it is 0 then it is linear. However because of the data must have a confidence interval
secondDeriv = np.diff(np.log(fitOD), 2)
startLinear = -1.0
stopLinear = -1.0
linearTime = []
linearOD = []
count = 0
for i in range(len(secondDeriv)):
if confidenceBound>=secondDeriv[i]>=-confidenceBound and fitTime[i]>ignoreTime:
if startLinear==-1.0:
startLinear = fitTime[i]
else:
if count>=minFlatSize:
stopLinear = fitTime[i]
count += 1
linearTime.append(fitTime[i])
linearOD.append(fitOD[i])
else:
if not startLinear==-1.0 and not stopLinear==-1.0:
count = 0
break
if not startLinear==-1.0 and stopLinear==-1.0:
linearTime = []
linearOD = []
startLinear = -1.0
count = 0
if startLinear==-1.0 and stopLinear==-1.0:
linearTime = []
linearOD = []
startLoc = -1.0
for i in range(len(fitTime)):
if fitTime[i]>=stopLinear:
startLoc = i
break
#[ unicode(x.strip()) if x is not None else '' for x in row ]
fitDerivOD = [i if not 0.0>i>-0.0000005 else 0.0 for i in fitDerivOD]
fitDerivOD = [i*100.0 for i in fitDerivOD]
isNegOD = any(i<0.0 for i in fitOD[startLoc:len(fitOD)])
isNegDeriv = any(y<0.0 for y in fitDerivOD[startLoc:len(fitDerivOD)])
if not fitDerivOD[startLoc]==0.0:
#expoTau = math.log(2)/fitDerivOD[startLoc]
expoTau = math.log(2)/np.polyfit(linearTime, np.log(linearOD), 1)[0]
else:
expoTau = 0.0
return fitOD, fitDerivOD, maxOD, isNegOD, isNegDeriv, expoTau, ss, startLinear, stopLinear
#############################################################
###################### USER SMOOTHIG ########################
#############################################################
##
# @brief Matplotlib plotting of the OD spline fit with user input of smoothing
#
# Given that the smoothing parameter, minimal length of the linear section of growth, and the confidence bounds, this functions plots the results and requires the user inout of all of these three parameters. This also checks that there are no decreasing OD values and that the fit is valid overall
#
# @param time Orginial measured time values
# @param od Original measured OD values
# @param dt Time step
# @param fname Input file name with the histogram time series
#
# @return fitTime Spline fit time
# @return fitOD Spline fit OD values
# @return fitDerivOD Spline fit first order derivative OD values
# @return p Smoothing parameter of the
# @return maxOD Maximal OD values of the spline fit
# @return expoTau Growth rate of the exponential section of the growth curve
# @return fitObj SmoothingSpline object
# @return startLinear Start of the linear section of the growth curve
# @return stopLinear End of the linear section of the growth curve
# @return minFlatSize Minimal size of the array that is the exponential section of the growth curve
# @return confidenceBound confidence of the linear fit to the OD growth curve
#
#TODO: Make this in something else (javascript), not matplotlib
def userSmoothing(time, od, dt, fname):
od = [0.0001 if x<=0.0 else x for x in od]
spacing = np.mean([math.fabs(time[i+1]-time[i]) for i in range(len(time)-2)])
p = 1.0/(1.0+math.pow(np.mean(spacing),3.0)/6.0)
fitTime = np.arange(time[0], time[-1], dt)
confidenceBound = 0.000000001
minFlatSize = 3000
ignoreTime = 10.0
#identify maxOD, and if not OD[-1] fill it with maxOD until next Flow Measurement
if max(od)!=od[-1]:
maxODIndex = od.index(max(od))
od = od[0:maxODIndex]
time = time[0:maxODIndex]
time, od, percentMissing = gapFill(time[:], od[:])
fitOD, fitDerivOD, maxOD, isNegOD, isNegDeriv, expoTau, fitObj, startLinear, stopLinear = tryNewP(fitTime, time, od, p/(1+1), dt, confidenceBound, minFlatSize, ignoreTime)
#isValid = False
#for i in range(5):
# fitOD, fitDerivOD, maxOD, isNegOD, isNegDeriv, expoTau, fitObj, startLinear, stopLinear = tryNewP(fitTime, time, od, p/(i+1), dt, confidenceBound)
# if not isNegOD and not isNegDeriv:
# isValid = True
# break
#if not isValid:
# time, od, percentMissing = gapFill(time[:], od[:])
fitTime = np.arange(time[0], time[-1], dt)
plt.ion()
fitOD, fitDerivOD, maxOD, isNegOD, isNegDeriv, expoTau, fitObj, startLinear, stopLinear = tryNewP(fitTime, time, od, p, dt, confidenceBound, minFlatSize, ignoreTime)
print(startLinear)
print(stopLinear)
plotIt(fitTime, fitOD, time, od, fitDerivOD, isNegOD, isNegDeriv, expoTau, startLinear, stopLinear)
while True:
print('p: '+str(p))
inP = input('Input different p value?: ')
print('confidenceBound: '+str(confidenceBound))
inConfB = input('Input different confidenceBound value?: ')
print('minFlatSize: '+str(minFlatSize))
mfs = input('Input different minFlatSize?: ')
print('ignoreTime: '+str(ignoreTime))
it = input('Input different ingoreTime?: ')
try:
p = float(inP)
confidenceBound = float(inConfB)
minFlatSize = float(mfs)
ignoreTime = float(it)
plt.clf()
fitOD, fitDerivOD, maxOD, isNegOD, isNegDeriv, expoTau, fitObj, startLinear, stopLinear= tryNewP(fitTime, time, od, p, dt, confidenceBound, minFlatSize, ignoreTime)
plotIt(fitTime, fitOD, time, od, fitDerivOD, isNegOD, isNegDeriv, expoTau, startLinear, stopLinear)
print(startLinear)
print(stopLinear)
except ValueError:
if not isNegOD and not isNegDeriv:
plt.savefig(fname+'.svg')
plt.close()
break
else:
print('The smoothing parameters are invalid... Try again')
print('isNegOD: '+str(isNegOD))
print('isNegDeriv: '+str(isNegDeriv))
return fitTime, fitOD, fitDerivOD, p, maxOD, expoTau, fitObj, startLinear, stopLinear, minFlatSize, confidenceBound
#############################################################
######################## SMOOTHIG #$$#######################
#############################################################
##
# @bref Non user defined smoothing, and further calculations from the growth curve
#
# Given the deduced smoothing parameter, minimal size of the linear section of growth and confidence bound calculated from the userSmoothing functions, perform calculation and return all the calculations
#
# @param time Orginial measured time values
# @param od Original measured OD values
# @param dt Time step
# @param fname Input file name with the histogram time series
# @param p Smoothing parameter of the
# @param confidenceBound confidence of the linear fit to the OD growth curve
# @param minFlatSize Minimal size of the array that is the exponential section of the growth curve
#
# @return fitTime Spline fit time
# @return fitOD Spline fit OD values
# @return fitDerivOD Spline fit first order derivative OD values
# @return maxOD Maximal OD values of the spline fit
# @return expoTau Growth rate of the exponential section of the growth curve
# @return fitObj SmoothingSpline object
# @return startLinear Start of the linear section of the growth curve
# @return stopLinear End of the linear section of the growth curve
#
def smoothing(time, od, dt, fname, p, confidenceBound, minFlatSize):
spacing = np.mean([math.fabs(time[i+1]-time[i]) for i in range(len(time)-2)])
fitTime = np.arange(time[0], time[-1], dt)
time, od, percentMissing = gapFill(time[:], od[:])
#identify maxOD, and if not OD[-1] fill it with maxOD until next Flow Measurement
if max(od)!=od[-1]:
maxODIndex = od.index(max(od))
od = od[0:maxODIndex]
time = time[0:maxODIndex]
fitOD, fitDerivOD, maxOD, isNegOD, isNegDeriv, expoTau, fitObj, startLinear, stopLinear, ingoreTime = tryNewP(fitTime, time, od, p, dt, confidenceBound, minFlatSize)
return fitTime, fitOD, fitDerivOD, p, max(od), expoTau, fitObj, startLinear, stopLinear
##############################################################
##############################################################
##############################################################
##
# @brief Main function that calculates the different
#
# @param inputFile Path to the input file with the histograms
# @param inputTime Growth rate input time
# @param inputOD Growth rate input OD
# @param polyChannel parameters of the first order polynomial that converts the channel DNA content
# @param dt Time step
# @param p Smoothing parameter
# @param confidenceBound Confidence bounds of the linear fit of the growth curve
# @param minFlatSize Minimal size of the linear section of the growth curve
#
# @return flowTimes Ouput flow cytometry times in minutes
# @return flowData Output flow cytometry histograms
# @return flowScale Output flow cytometry DNA x-axis scale
# @return injectionOD Spline fit OD data based on dt time step
# @return injectionGR Spline fit instateneous growth rate based on dt
# @return injectionGrownMass Calculated spline fit total volumetric changes of the growth curve based on dt
# @return injectionTime Spline fit time based on dt
# @return expoTau Linear section of the growth curve growth rate
#
def genData(inputFile, inputTime, inputOD, polyChannel, dt, p=-1.0, confidenceBound=-1.0, minFlatSize=-1.0):
flowTimes, flowData, flowScale = parseFlowData(inputFile, polyChannel)
if(p==-1.0):
fitTime, fitOD, fitDerivOD, p, maxOD, expoTau, fitObj, startLinear, endLinear, minFlatSize, confidenceBound = userSmoothing(inputTime, inputOD, dt, inputFile.split('/')[-1].replace('.csv', ''))
else:
fitTime, fitOD, fitDerivOD, p, maxOD, expoTau, fitObj, startLinear, endLinear = smoothing(inputTime, inputOD, dt, inputFile.split('/')[-1].replace('.csv', ''), p, confidenceBound, minFlatSize)
#the flow data should be limitied to one for the exponential phase and then the rest of the stationnary phase
flowLinearData = []
flowLinearScale = []
flowLinearTimes = []
for i in range(len(flowTimes)):
if flowTimes[i]>=startLinear and flowTimes[i]<=endLinear:
flowLinearData.append(flowData[i])
flowLinearScale.append(flowScale[i])
flowLinearTimes.append(flowTimes[i])
tmpFlowData = []
tmpFlowScale = []
tmpFlowTimes = []
tmpFlowData.append(flowLinearData[-1])
tmpFlowScale.append(flowLinearScale[-1])
tmpFlowTimes.append(flowLinearTimes[-1])
for i in range(len(flowTimes)):
if flowTimes[i]>endLinear:
tmpFlowData.append(flowData[i])
tmpFlowScale.append(flowScale[i])
tmpFlowTimes.append(flowTimes[i])
flowData = tmpFlowData
#flowScale = tmpFlowScale
flowTimes = tmpFlowTimes
injectionTime = [np.arange(flowTimes[i], flowTimes[i+1]+dt, dt) for i in range(len(flowTimes)-1)]
injectionOD = [list(fitObj(i)) for i in injectionTime]
injectionGR = [list(np.gradient(np.log(i))*100.0) for i in injectionOD]
injectionGrownMass = [[(3.6*y)*math.pow(10.0, 9.0) for y in i] for i in injectionOD]
return flowTimes, flowData, flowScale, injectionOD, injectionGR, injectionGrownMass, injectionTime, expoTau
| gpl-3.0 |
thilbern/scikit-learn | benchmarks/bench_multilabel_metrics.py | 11 | 7258 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': f1_score,
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: '.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
MJuddBooth/pandas | pandas/tests/indexes/multi/test_missing.py | 2 | 4113 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
import pandas as pd
from pandas import Int64Index, MultiIndex, PeriodIndex, UInt64Index
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
def test_fillna(idx):
# GH 11343
# TODO: Remove or Refactor. Not Implemented for MultiIndex
for name, index in [('idx', idx), ]:
if len(index) == 0:
pass
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.fillna(idx[0])
else:
idx = index.copy()
result = idx.fillna(idx[0])
tm.assert_index_equal(result, idx)
assert result is not idx
msg = "'value' must be a scalar, passed: "
with pytest.raises(TypeError, match=msg):
idx.fillna([idx[0]])
idx = index.copy()
values = idx.values
if isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
continue
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = index.__class__(values, freq=index.freq)
else:
idx = index.__class__(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_dropna():
# GH 6194
idx = pd.MultiIndex.from_arrays([[1, np.nan, 3, np.nan, 5],
[1, 2, np.nan, np.nan, 5],
['a', 'b', 'c', np.nan, 'e']])
exp = pd.MultiIndex.from_arrays([[1, 5],
[1, 5],
['a', 'e']])
tm.assert_index_equal(idx.dropna(), exp)
tm.assert_index_equal(idx.dropna(how='any'), exp)
exp = pd.MultiIndex.from_arrays([[1, np.nan, 3, 5],
[1, 2, np.nan, 5],
['a', 'b', 'c', 'e']])
tm.assert_index_equal(idx.dropna(how='all'), exp)
msg = "invalid how option: xxx"
with pytest.raises(ValueError, match=msg):
idx.dropna(how='xxx')
def test_nulls(idx):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.isna()
@pytest.mark.xfail
def test_hasnans_isnans(idx):
# GH 11343, added tests for hasnans / isnans
index = idx.copy()
# cases in indices doesn't include NaN
expected = np.array([False] * len(index), dtype=bool)
tm.assert_numpy_array_equal(index._isnan, expected)
assert index.hasnans is False
index = idx.copy()
values = index.values
values[1] = np.nan
index = idx.__class__(values)
expected = np.array([False] * len(index), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(index._isnan, expected)
assert index.hasnans is True
def test_nan_stays_float():
# GH 7031
idx0 = pd.MultiIndex(levels=[["A", "B"], []],
codes=[[1, 0], [-1, -1]],
names=[0, 1])
idx1 = pd.MultiIndex(levels=[["C"], ["D"]],
codes=[[0], [0]],
names=[0, 1])
idxm = idx0.join(idx1, how='outer')
assert pd.isna(idx0.get_level_values(1)).all()
# the following failed in 0.14.1
assert pd.isna(idxm.get_level_values(1)[:-1]).all()
df0 = pd.DataFrame([[1, 2]], index=idx0)
df1 = pd.DataFrame([[3, 4]], index=idx1)
dfm = df0 - df1
assert pd.isna(df0.index.get_level_values(1)).all()
# the following failed in 0.14.1
assert pd.isna(dfm.index.get_level_values(1)[:-1]).all()
| bsd-3-clause |
kgadek/evogil | algorithms/NSGAIII/NSGAIII.py | 1 | 18189 | import collections
import math
import random
import numpy
import numpy.linalg
from algorithms.base.drivergen import DriverGen, ImgaProxy
EPSILON = numpy.finfo(float).eps
import matplotlib.pyplot as plt
class NSGAIII(DriverGen):
class NSGAIIIImgaProxy(ImgaProxy):
def __init__(self, driver, cost, fronts, individuals):
super().__init__(driver, cost)
self.individuals = individuals
self.fronts = fronts
def finalized_population(self):
return [x.v for x in self.individuals]
def current_population(self):
return [x.v for x in self.individuals]
def deport_emigrants(self, immigrants):
immigrants_cp = list(immigrants)
to_remove = []
for p in self.individuals:
if p.v in immigrants_cp:
to_remove.append(p)
immigrants_cp.remove(p.v)
for p in to_remove:
self.individuals.remove(p)
return to_remove
def assimilate_immigrants(self, emigrants):
self.individuals.extend(emigrants)
def nominate_delegates(self):
return [x.v for x in self.fronts[1]]
def __init__(self,
population,
dims,
fitnesses,
mutation_eta,
crossover_eta,
mutation_rate='default',
crossover_rate=0.9,
theta=5,
trim_function=lambda x: x,
fitness_archive=None):
super().__init__()
self.fitness_archive = fitness_archive
self.theta = theta
self.dims = dims
self.dims_no = len(dims)
self.objectives = fitnesses
self.objective_no = len(self.objectives)
self.eta_crossover = crossover_eta
self.eta_mutation = mutation_eta
self.crossover_rate = crossover_rate
self.mutation_rate = 1.0 / len(self.dims) if mutation_rate is 'default' else mutation_rate
self.population_size = len(population)
self.reference_points = self.generate_reference_points()
self.reference_point_lengths = [numpy.linalg.norm(point) for point in self.reference_points]
self.individuals = []
self.trim_function = trim_function
self.population = [self.trim_function(x) for x in population]
self.cost = 0
self.primary_cost_included = False
self.budget = None
self.ideal_point = [float('inf') for _ in range(self.objective_no)]
self.update_ideal_point(self.individuals)
# TODO: remove debug
# plt.scatter([x.v[0] for x in self.individuals], [x.v[1] for x in self.individuals], c='g')
# plt.scatter([x.objectives[0] for x in self.individuals], [x.objectives[1] for x in self.individuals], c='b')
# plt.scatter([self.ideal_point[0]], [self.ideal_point[1]], c='r')
# plt.show()
self.clusters = [[] for _ in self.reference_points]
def generate_reference_points(self):
return [generate_reference_point(self.objective_no) for _ in range(self.population_size)]
@property
def population(self):
return [x.v for x in self.individuals]
@population.setter
def population(self, pop):
# should work anyway, stabilize after each offspring generaion
# if len(pop) % 2 != 0:
# raise ValueError("Population must be even")
# if len(pop) < len(self.reference_points):
# raise ValueError("Population too small for the requested amount of reference points")
self.individuals = [Individual(x) for x in pop]
self.population_size = len(self.individuals)
def _calculate_objectives(self, individuals):
for ind in individuals:
if ind.objectives is None:
if (self.fitness_archive is not None) and (ind.v in self.fitness_archive):
ind.objectives = self.fitness_archive[ind.v]
else:
self.cost += 1
ind.objectives = [objective(ind.v) for objective in self.objectives]
if self.fitness_archive is not None:
self.fitness_archive[ind.v] = ind.objectives
def update_ideal_point(self, individuals):
self._calculate_objectives(individuals)
for ind in individuals:
for i, objective in enumerate(ind.objectives):
if self.ideal_point[i] > objective:
self.ideal_point[i] = objective
def population_generator(self):
while True:
fronts = self.next_step()
yield NSGAIII.NSGAIIIImgaProxy(self, self.cost, fronts, self.individuals)
self.cost = 0
def next_step(self):
offspring_inds = self.make_offspring_individuals()
for ind in offspring_inds:
ind.v = self.trim_function(ind.v)
# TODO: remove debug
# plt.scatter([x.v[0] for x in offspring_inds], [x.v[1] for x in offspring_inds], c='b', marker='^')
# plt.show()
self.update_ideal_point(offspring_inds)
# TODO: remove debug
# plt.scatter([x.objectives[0] for x in offspring_inds],
# [x.objectives[1] for x in offspring_inds], c='g', marker='^')
# plt.scatter([self.ideal_point[0]], [self.ideal_point[1]], c='r')
# plt.show()
offspring_inds.extend(self.individuals)
self.normalize(offspring_inds)
# TODO: remove debug
# plt.scatter([x.normalized_objectives[0] for x in offspring_inds],
# [x.normalized_objectives[1] for x in offspring_inds])
# plt.scatter([x[0] for x in self.reference_points], [x[1] for x in self.reference_points], c='k', marker='+')
# plt.show()
self.clustering(offspring_inds)
# TODO: remove debug
# for i, cluster in enumerate(self.clusters):
# c = None
# k = 7
# if i % k == 0:
# c = 'r'
# elif i % k == 1:
# c = 'g'
# elif i % k == 2:
# c = 'b'
# elif i % k == 3:
# c = 'y'
# elif i % k == 4:
# c = 'k'
# elif i % k == 5:
# c = 'c'
# elif i % k == 6:
# c = 'm'
#
# if len(cluster) > 0:
# plt.scatter([x.normalized_objectives[0] for x in cluster],
# [x.normalized_objectives[1] for x in cluster], c=c, marker='s')
# plt.scatter([self.reference_points[i][0]], [self.reference_points[i][1]], c=c, marker='o', s=500)
# plt.scatter([x[0] for x in self.reference_points], [x[1] for x in self.reference_points], c='k', marker='o')
# plt.xlim([-0.2, 1.2])
# plt.ylim([-0.2, 1.2])
# plt.show()
#
# plt.scatter([x.objectives[0] for x in offspring_inds], [x.objectives[1] for x in offspring_inds],
# c='g', s=400)
self.calculate_theta_fitness()
fronts = theta_non_dominated_sort(offspring_inds)
# TODO: remove debug
# for i in fronts.keys():
# front = fronts[i]
# c = None
# k = 7
# if i == 1:
# c = 'r'
# elif i == 2:
# c = 'g'
# elif i == 3:
# c = 'b'
# elif i == 4:
# c = 'y'
# elif i == 5:
# c = 'm'
# elif i == 6:
# c = 'c'
# else:
# c = 'k'
#
# plt.scatter([x.normalized_objectives[0] for x in front], [x.normalized_objectives[1] for x in front],
# c=c, marker='s', s=100)
# plt.xlim([-0.2, 1.2])
# plt.ylim([-0.2, 1.2])
# plt.show()
self.create_final_population(fronts)
return fronts
def make_offspring_individuals(self):
offspring_inds = []
for _ in range(int(self.population_size / 2)):
parent_a = random.choice(self.individuals)
parent_b = random.choice(self.individuals)
child_a, child_b = simulated_binary_crossover(parent_a, parent_b, self.dims,
crossover_rate=self.crossover_rate, eta=self.eta_crossover)
polynomial_mutation(child_a, self.dims, mutation_rate=self.mutation_rate, eta=self.eta_mutation)
polynomial_mutation(child_b, self.dims, mutation_rate=self.mutation_rate, eta=self.eta_mutation)
offspring_inds.append(child_a)
offspring_inds.append(child_b)
return offspring_inds
def normalize(self, individuals):
defiled_point = [float('-inf') for _ in range(self.objective_no)]
for ind in individuals:
for i, objective in enumerate(ind.objectives):
if defiled_point[i] < objective:
defiled_point[i] = objective
# TODO: remove debug
# plt.scatter([defiled_point[0]], [defiled_point[1]], c='r')
# plt.show()
for ind in individuals:
ind.normalized_objectives = numpy.array(
[(obj - self.ideal_point[i]) / (defiled_point[i] - self.ideal_point[i] + EPSILON)
for i, obj in enumerate(ind.objectives)])
def clustering(self, individuals):
self.clusters = [[] for _ in self.reference_points]
for ind in individuals:
min_rejection = float('inf')
min_i = -1
for i, reference_point in enumerate(self.reference_points):
rejection = scalar_rejection(ind, reference_point, self.reference_point_lengths[i])
if rejection < min_rejection:
min_rejection = rejection
min_i = i
self.clusters[min_i].append(ind)
ind.cluster = min_i
ind.rejection = min_rejection
def calculate_theta_fitness(self):
for i, cluster in enumerate(self.clusters):
for ind in cluster:
ind.theta_fitness = scalar_projection(ind, self.reference_points[i],
self.reference_point_lengths[i]) + self.theta * ind.rejection
def create_final_population(self, fronts):
new_inds = []
new_size = len(new_inds)
i = 1
while new_size + len(fronts[i]) <= self.population_size:
new_inds.extend(fronts[i])
new_size = len(new_inds)
i += 1
new_inds.extend(random.sample(fronts[i], self.population_size - len(new_inds)))
# TODO remove debug
# for front in fronts.values():
# plt.scatter([x.v[0] for x in front], [x.v[1] for x in front], c='k', s=100)
# plt.scatter([x.v[0] for x in new_inds], [x.v[1] for x in new_inds], c='r', s=50)
# plt.xlim(-12, 12)
# plt.ylim(-12, 12)
# plt.show()
#
# for front in fronts.values():
# plt.scatter([x.objectives[0] for x in front], [x.objectives[1] for x in front], c='k', s=100)
# plt.scatter([x.objectives[0] for x in new_inds], [x.objectives[1] for x in new_inds], c='r', s=50)
# plt.xlim(-15., 5.)
# plt.ylim(-15., 25.)
# plt.show()
self.individuals = new_inds
def finish(self):
return [x.v for x in self.individuals]
class Individual:
def __init__(self, vector):
self.v = vector
self.objectives = None
def theta_non_dominated_sort(individuals):
dominated_by = collections.defaultdict(set)
how_many_dominates = collections.defaultdict(int)
nsga_rank = collections.defaultdict(int)
front = collections.defaultdict(list)
for x in individuals:
for y in individuals:
if dominates(x, y):
dominated_by[x].add(y)
elif dominates(y, x):
how_many_dominates[x] += 1
if how_many_dominates[x] == 0:
nsga_rank[x] = 1
front[1].append(x)
front_no = 1
while not len(front[front_no]) == 0:
for x in front[front_no]:
for y in dominated_by[x]:
how_many_dominates[y] -= 1
if how_many_dominates[y] == 0:
nsga_rank[y] = front_no + 1
front[front_no + 1].append(y)
front_no += 1
return front
def dominates(x, y):
if x.cluster == y.cluster and x.theta_fitness < y.theta_fitness:
return True
else:
return False
def generate_reference_point(objective_no):
reference_point = []
coord_sum = 0.0
for i in range(1, objective_no + 1):
if i < objective_no:
rand = 0.0
while rand == 0.0:
rand = random.random()
coordinate = (1.0 - coord_sum) * (1.0 - math.pow(rand, 1.0 / (objective_no - i)))
coord_sum += coordinate
reference_point.append(coordinate)
else:
reference_point.append(1.0 - coord_sum)
return numpy.array(reference_point)
def scalar_projection(ind, reference_point, reference_point_length):
return numpy.dot(ind.normalized_objectives, reference_point) / reference_point_length
def scalar_rejection(ind, reference_point, reference_point_length):
scalar_projection_value = scalar_projection(ind, reference_point, reference_point_length)
return numpy.linalg.norm(
ind.normalized_objectives - ((scalar_projection_value / reference_point_length) * reference_point))
def simulated_binary_crossover(parent_a, parent_b, dims, crossover_rate=1.0, eta=30.0):
child_a = Individual([x for x in parent_a.v])
child_b = Individual([x for x in parent_b.v])
if random.random() > crossover_rate:
child_a.objectives = [x for x in parent_a.objectives]
child_b.objectives = [x for x in parent_b.objectives]
return child_a, child_b
for i, dim in enumerate(dims):
if random.random() > 0.5:
continue
if math.fabs(parent_a.v[i] - parent_b.v[i]) <= EPSILON:
continue
y1 = min(parent_a.v[i], parent_b.v[i])
y2 = max(parent_a.v[i], parent_b.v[i])
lb, ub = dim
rand = random.random()
# child a
beta = 1.0 + (2.0 * (y1 - lb) / (y2 - y1 + EPSILON))
alpha = 2.0 - pow(beta, -(eta + 1.0))
beta_q = get_beta_q(rand, alpha, eta)
child_a.v[i] = 0.5 * ((y1 + y2) - beta_q * (y2 - y1))
# child b
beta = 1.0 + (2.0 * (ub - y2) / (y2 - y1 + EPSILON))
alpha = 2.0 - pow(beta, -(eta + 1.0))
beta_q = get_beta_q(rand, alpha, eta)
child_b.v[i] = 0.5 * ((y1 + y2) + beta_q * (y2 - y1))
# boundary checking
child_a.v[i] = min(ub, max(lb, child_a.v[i]))
child_b.v[i] = min(ub, max(lb, child_b.v[i]))
if random.random() > 0.5:
temp = child_a.v[i]
child_a.v[i] = child_b.v[i]
child_b.v[i] = temp
return child_a, child_b
def get_beta_q(rand, alpha, eta):
if rand <= (1.0 / alpha):
beta_q = pow((rand * alpha), (1.0 / (eta + 1.0)))
else:
beta_q = pow((1.0 / (2.0 - rand * alpha)), (1.0 / (eta + 1.0)))
return beta_q
def polynomial_mutation(ind, dims, mutation_rate=0.0, eta=20.0):
for i, dim in enumerate(dims):
if random.random() > mutation_rate:
continue
y = ind.v[i]
lb, ub = dim
delta1 = (y - lb) / (ub - lb + EPSILON)
delta2 = (ub - y) / (ub - lb + EPSILON)
mut_pow = 1.0 / (eta + 1.0)
rnd = random.random()
if rnd <= 0.5:
xy = 1.0 - delta1
val = 2.0 * rnd + (1.0 - 2.0 * rnd) * (pow(xy, (eta + 1.0)))
delta_q = pow(val, mut_pow) - 1.0
else:
xy = 1.0 - delta2
val = 2.0 * (1.0 - rnd) + 2.0 * (rnd - 0.5) * (pow(xy, (eta + 1.0)))
delta_q = 1.0 - (pow(val, mut_pow))
y += delta_q * (ub - lb)
y = min(ub, max(lb, y))
ind.v[i] = y
ind.objectives = None
if __name__ == '__main__':
sample_dims = [(-100.0, 100.0), (-100.0, 100.0)]
mutatedX = []
mutatedY = []
for _ in range(100):
to_mut = Individual([0.0, 0.0])
polynomial_mutation(to_mut, sample_dims, 0.9, 300.0)
mutatedX.append(to_mut.v[0])
mutatedY.append(to_mut.v[1])
plt.scatter(mutatedX, mutatedY)
plt.xlim(-100.0, 100.0)
plt.ylim(-100.0, 100.0)
plt.show()
# crossX = []
# crossY = []
# for _ in range(10000):
# to_crossA = Individual([-10.0, -10.0])
# to_crossB = Individual([10.0, 10.0])
# newA, newB = simulated_binary_crossover(to_crossA, to_crossB, dims, 1.0, eta=150.0)
# crossX.append(newA.v[0])
# crossY.append(newA.v[1])
# crossX.append(newB.v[0])
# crossY.append(newB.v[1])
# plt.scatter(crossX, crossY)
# plt.xlim(-100.0, 100.0)
# plt.ylim(-100.0, 100.0)
#
# plt.show()
# objectives = [lambda x: -10 * math.exp(-0.2 * math.sqrt(x[0] * x[0] + x[1] * x[1])),
# lambda x: math.pow(abs(x[0]), 0.8) + 5 * math.pow(math.sin(x[0]), 3)
# + math.pow(abs(x[1]), 0.8) + 5 * math.pow(math.sin(x[1]), 3)]
# dimensions = [(-10, 10), (-10, 10)]
# my_individuals = [[random.uniform(-10, 10), random.uniform(-10, 10)] for _ in range(250)]
#
# my_pop = NSGAIII(my_individuals, dimensions, objectives, theta=0)
# # population.steps(range(100))
#
# for j in range(100):
# my_pop.next_step()
# print(j)
#
# effect = my_pop.finish()
# X = [my_pop.objectives[0](x) for x in effect]
# Y = [my_pop.objectives[1](x) for x in effect]
# plt.scatter(X, Y)
# # pylab.xlim(-10.,250.)
# # pylab.ylim(-10.,250.)
# plt.xlim(-15., 5.)
# plt.ylim(-15., 25.)
#
# file = ""
# if j < 10:
# file += "0"
# plt.savefig("pictures//" + file + str(j) + ".png")
# plt.clf()
| gpl-3.0 |
Solid-Mechanics/matplotlib-4-abaqus | matplotlib/testing/jpl_units/UnitDblConverter.py | 6 | 5452 | #===========================================================================
#
# UnitDblConverter
#
#===========================================================================
"""UnitDblConverter module containing class UnitDblConverter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import print_function
import numpy as np
import matplotlib.units as units
import matplotlib.ticker as ticker
import matplotlib.projections.polar as polar
from matplotlib.cbook import iterable
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'UnitDblConverter' ]
#===========================================================================
# A special function for use with the matplotlib FuncFormatter class
# for formatting axes with radian units.
# This was copied from matplotlib example code.
def rad_fn(x, pos = None ):
"""Radian function formatter."""
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return str(x)
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n % 2 == 0:
return r'$%s\pi$' % (n/2,)
else:
return r'$%s\pi/2$' % (n,)
#===========================================================================
class UnitDblConverter( units.ConversionInterface ):
""": A matplotlib converter class. Provides matplotlib conversion
functionality for the Monte UnitDbl class.
"""
# default for plotting
defaults = {
"distance" : 'km',
"angle" : 'deg',
"time" : 'sec',
}
#------------------------------------------------------------------------
@staticmethod
def axisinfo( unit, axis ):
""": Returns information on how to handle an axis that has Epoch data.
= INPUT VARIABLES
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns a matplotlib AxisInfo data structure that contains
minor/major formatters, major/minor locators, and default
label information.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
# Check to see if the value used for units is a string unit value
# or an actual instance of a UnitDbl so that we can use the unit
# value for the default axis label value.
if ( unit ):
if ( isinstance( unit, str ) ):
label = unit
else:
label = unit.label()
else:
label = None
if ( label == "deg" ) and isinstance( axis.axes, polar.PolarAxes ):
# If we want degrees for a polar plot, use the PolarPlotFormatter
majfmt = polar.PolarAxes.ThetaFormatter()
else:
majfmt = U.UnitDblFormatter( useOffset = False )
return units.AxisInfo( majfmt = majfmt, label = label )
#------------------------------------------------------------------------
@staticmethod
def convert( value, unit, axis ):
""": Convert value using unit to a float. If value is a sequence, return
the converted sequence.
= INPUT VARIABLES
- value The value or list of values that need to be converted.
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
isNotUnitDbl = True
if ( iterable(value) and not isinstance(value, str) ):
if ( len(value) == 0 ):
return []
else:
return [ UnitDblConverter.convert( x, unit, axis ) for x in value ]
# We need to check to see if the incoming value is actually a UnitDbl and
# set a flag. If we get an empty list, then just return an empty list.
if ( isinstance(value, U.UnitDbl) ):
isNotUnitDbl = False
# If the incoming value behaves like a number, but is not a UnitDbl,
# then just return it because we don't know how to convert it
# (or it is already converted)
if ( isNotUnitDbl and units.ConversionInterface.is_numlike( value ) ):
return value
# If no units were specified, then get the default units to use.
if ( unit == None ):
unit = UnitDblConverter.default_units( value, axis )
# Convert the incoming UnitDbl value/values to float/floats
if isinstance( axis.axes, polar.PolarAxes ) and (value.type() == "angle"):
# Guarantee that units are radians for polar plots.
return value.convert( "rad" )
return value.convert( unit )
#------------------------------------------------------------------------
@staticmethod
def default_units( value, axis ):
""": Return the default unit for value, or None.
= INPUT VARIABLES
- value The value or list of values that need units.
= RETURN VALUE
- Returns the default units to use for value.
Return the default unit for value, or None.
"""
# Determine the default units based on the user preferences set for
# default units when printing a UnitDbl.
if ( iterable(value) and not isinstance(value, str) ):
return UnitDblConverter.default_units( value[0], axis )
else:
return UnitDblConverter.defaults[ value.type() ]
| mit |
jepegit/cellpy | cellpy/readers/instruments/ext_nda_reader.py | 1 | 3805 | import os
import logging
import pandas as pd
from cellpy.readers.core import (
FileID,
Cell,
check64bit,
humanize_bytes,
xldate_as_datetime,
)
from cellpy.parameters.internal_settings import get_headers_normal
from cellpy.readers.instruments.mixin import Loader
from cellpy import prms
try:
from nda_reader import nda_reader
except ImportError:
logging.warn("Could not load nda reader")
# import nda_reader
# read_nda
# fix headers etc
# check
def load_nda(*args, **kwargs):
print("dummy function (mock)")
print(args)
print(kwargs)
class NdaLoader(Loader):
""" Class for using the NDA loader by Frederik Huld (Beyonder)."""
def __init__(self):
"""initiates the NdaLoader class"""
# could use __init__(self, cellpydata_object) and
# set self.logger = cellpydata_object.logger etc.
# then remember to include that as prm in "out of class" functions
# self.prms = prms
self.logger = logging.getLogger(__name__)
self.headers_normal = get_headers_normal()
self.headers_global = self.get_headers_global()
self.current_chunk = 0 # use this to set chunks to load
def get_raw_units(self):
"""Include the settings for the units used by the instrument.
The units are defined w.r.t. the SI units ('unit-fractions'; currently only units that are multiples of
Si units can be used). For example, for current defined in mA, the value for the
current unit-fraction will be 0.001.
Returns: dictionary containing the unit-fractions for current, charge, and mass
"""
raise NotImplementedError
def get_raw_limits(self):
"""Include the settings for how to decide what kind of step you are examining here.
The raw limits are 'epsilons' used to check if the current and/or voltage is stable (for example
for galvanostatic steps, one would expect that the current is stable (constant) and non-zero).
It is expected that different instruments (with different resolution etc.) have different
'epsilons'.
Returns: the raw limits (dict)
"""
raise NotImplementedError
def loader(self, file_name, *args, **kwargs):
"""Loads data into a DataSet object and returns it"""
new_tests = []
test_no = 1
channel_index = 1
channel_number = 1
creator = "no name"
item_ID = 1
schedule_file_name = "no name"
start_datetime = "2020.02.24 14:58:00"
test_ID = 1
test_name = "no name"
if not os.path.isfile(file_name):
self.logger.info("Missing file_\n %s" % file_name)
return None
self.logger.debug("in loader")
self.logger.debug("filename: %s" % file_name)
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
self.logger.debug(txt)
data = Cell()
data.cell_no = test_no
data.loaded_from = file_name
fid = FileID(file_name)
data.channel_index = channel_index
data.channel_number = channel_number
data.creator = creator
data.item_ID = item_ID
data.schedule_file_name = schedule_file_name
data.start_datetime = start_datetime
data.test_ID = test_ID
data.test_name = test_name
data.raw_data_files.append(fid)
length_of_test, normal_df = load_nda()
data.summary = empty_df
data.raw = normal_df
data.raw_data_files_length.append(length_of_test)
data = self._post_process(data)
data = self.identify_last_data_point(data)
new_tests.append(data)
return new_tests
| mit |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/doc/sphinxext/numpydoc/docscrape_sphinx.py | 41 | 9437 | from __future__ import division, absolute_import, print_function
import sys, re, inspect, textwrap, pydoc
import sphinx
import collections
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self):
out = []
if self['Returns']:
out += self._str_field_list('Returns')
out += ['']
for param, param_type, desc in self['Returns']:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| mit |
Averroes/statsmodels | statsmodels/examples/ex_emplike_1.py | 34 | 3682 | """
This is a basic tutorial on how to conduct basic empirical likelihood
inference for descriptive statistics. If matplotlib is installed
it also generates plots.
"""
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
print('Welcome to El')
np.random.seed(634) # No significance of the seed.
# Let's first generate some univariate data.
univariate = np.random.standard_normal(30)
# Now let's play with it
# Initiate an empirical likelihood descriptive statistics instance
eldescriptive = sm.emplike.DescStat(univariate)
# Empirical likelihood is (typically) a method of inference,
# not estimation. Therefore, there is no attribute eldescriptive.mean
# However, we can check the mean:
eldescriptive_mean = eldescriptive.endog.mean() #.42
#Let's conduct a hypothesis test to see if the mean is 0
print('Hypothesis test results for the mean:')
print(eldescriptive.test_mean(0))
# The first value is is -2 *log-likelihood ratio, which is distributed
#chi2. The second value is the p-value.
# Let's see what the variance is:
eldescriptive_var = eldescriptive.endog.var() # 1.01
#Let's test if the variance is 1:
print('Hypothesis test results for the variance:')
print(eldescriptive.test_var(1))
# Let's test if Skewness and Kurtosis are 0
print('Hypothesis test results for Skewness:')
print(eldescriptive.test_skew(0))
print('Hypothesis test results for the Kurtosis:')
print(eldescriptive.test_kurt(0))
# Note that the skewness and Kurtosis take longer. This is because
# we have to optimize over the nuisance parameters (mean, variance).
# We can also test for the joint skewness and kurtoses
print(' Joint Skewness-Kurtosis test')
eldescriptive.test_joint_skew_kurt(0, 0)
# Let's try and get some confidence intervals
print('Confidence interval for the mean')
print(eldescriptive.ci_mean())
print('Confidence interval for the variance')
print(eldescriptive.ci_var())
print('Confidence interval for skewness')
print(eldescriptive.ci_skew())
print('Confidence interval for kurtosis')
print(eldescriptive.ci_kurt())
# if matplotlib is installed, we can get a contour plot for the mean
# and variance.
mean_variance_contour = eldescriptive.plot_contour(-.5, 1.2, .2, 2.5, .05, .05)
# This returns a figure instance. Just type mean_var_contour.show()
# to see the plot.
# Once you close the plot, we can start some multivariate analysis.
x1 = np.random.exponential(2, (30, 1))
x2 = 2 * x1 + np.random.chisquare(4, (30, 1))
mv_data = np.concatenate((x1, x2), axis=1)
mv_elmodel = sm.emplike.DescStat(mv_data)
# For multivariate data, the only methods are mv_test_mean,
# mv mean contour and ci_corr and test_corr.
# Let's test the hypthesis that x1 has a mean of 2 and x2 has a mean of 7
print('Multivaraite mean hypothesis test')
print(mv_elmodel.mv_test_mean(np.array([2, 7])))
# Now let's get the confidence interval for correlation
print('Correlation Coefficient CI')
print(mv_elmodel.ci_corr())
# Note how this took much longer than previous functions. That is
# because the function is optimizing over 4 nuisance parameters.
# We can also do a hypothesis test for correlation
print('Hypothesis test for correlation')
print(mv_elmodel.test_corr(.7))
# Finally, let's create a contour plot for the means of the data
means_contour = mv_elmodel.mv_mean_contour(1, 3, 6,9, .15,.15, plot_dta=1)
# This also returns a fig so we can type mean_contour.show() to see the figure
# Sometimes, the data is very dispersed and we would like to see the confidence
# intervals without the plotted data. Let's see the difference when we set
# plot_dta=0
means_contour2 = mv_elmodel.mv_mean_contour(1, 3, 6,9, .05,.05, plot_dta=0)
| bsd-3-clause |
hsu/ardupilot | Tools/mavproxy_modules/lib/magcal_graph_ui.py | 108 | 8248 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from pymavlink.mavutil import mavlink
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
import geodesic_grid as grid
class MagcalPanel(wx.Panel):
_status_markup_strings = {
mavlink.MAG_CAL_NOT_STARTED: 'Not started',
mavlink.MAG_CAL_WAITING_TO_START: 'Waiting to start',
mavlink.MAG_CAL_RUNNING_STEP_ONE: 'Step one',
mavlink.MAG_CAL_RUNNING_STEP_TWO: 'Step two',
mavlink.MAG_CAL_SUCCESS: '<span color="blue">Success</span>',
mavlink.MAG_CAL_FAILED: '<span color="red">Failed</span>',
}
_empty_color = '#7ea6ce'
_filled_color = '#4680b9'
def __init__(self, *k, **kw):
super(MagcalPanel, self).__init__(*k, **kw)
facecolor = self.GetBackgroundColour().GetAsString(wx.C2S_HTML_SYNTAX)
fig = plt.figure(facecolor=facecolor, figsize=(1,1))
self._canvas = FigureCanvas(self, wx.ID_ANY, fig)
self._canvas.SetMinSize((300,300))
self._id_text = wx.StaticText(self, wx.ID_ANY)
self._status_text = wx.StaticText(self, wx.ID_ANY)
self._completion_pct_text = wx.StaticText(self, wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._id_text)
sizer.Add(self._status_text)
sizer.Add(self._completion_pct_text)
sizer.Add(self._canvas, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
ax = fig.add_subplot(111, axis_bgcolor=facecolor, projection='3d')
self.configure_plot(ax)
def configure_plot(self, ax):
extra = .5
lim = grid.radius + extra
ax.set_xlim3d(-lim, lim)
ax.set_ylim3d(-lim, lim)
ax.set_zlim3d(-lim, lim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
self._polygons_collection = Poly3DCollection(
grid.sections_triangles,
edgecolors='#386694',
)
ax.add_collection3d(self._polygons_collection)
def update_status_from_mavlink(self, m):
status_string = self._status_markup_strings.get(m.cal_status, '???')
self._status_text.SetLabelMarkup(
'<b>Status:</b> %s' % status_string,
)
def mavlink_magcal_report(self, m):
self.update_status_from_mavlink(m)
self._completion_pct_text.SetLabel('')
def mavlink_magcal_progress(self, m):
facecolors = []
for i, mask in enumerate(m.completion_mask):
for j in range(8):
section = i * 8 + j
if mask & 1 << j:
facecolor = self._filled_color
else:
facecolor = self._empty_color
facecolors.append(facecolor)
self._polygons_collection.set_facecolors(facecolors)
self._canvas.draw()
self._id_text.SetLabelMarkup(
'<b>Compass id:</b> %d' % m.compass_id
)
self._completion_pct_text.SetLabelMarkup(
'<b>Completion:</b> %d%%' % m.completion_pct
)
self.update_status_from_mavlink(m)
_legend_panel = None
@staticmethod
def legend_panel(*k, **kw):
if MagcalPanel._legend_panel:
return MagcalPanel._legend_panel
p = MagcalPanel._legend_panel = wx.Panel(*k, **kw)
sizer = wx.BoxSizer(wx.HORIZONTAL)
p.SetSizer(sizer)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._empty_color)
sizer.Add(marker, flag=wx.ALIGN_CENTER)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections not hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._filled_color)
sizer.Add(marker, border=10, flag=wx.ALIGN_CENTER | wx.LEFT)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
return p
class MagcalFrame(wx.Frame):
def __init__(self, conn):
super(MagcalFrame, self).__init__(
None,
wx.ID_ANY,
title='Magcal Graph',
)
self.SetMinSize((300, 300))
self._conn = conn
self._main_panel = wx.ScrolledWindow(self, wx.ID_ANY)
self._main_panel.SetScrollbars(1, 1, 1, 1)
self._magcal_panels = {}
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._main_panel.SetSizer(self._sizer)
idle_text = wx.StaticText(self._main_panel, wx.ID_ANY)
idle_text.SetLabelMarkup('<i>No calibration messages received yet...</i>')
idle_text.SetForegroundColour('#444444')
self._sizer.AddStretchSpacer()
self._sizer.Add(
idle_text,
proportion=0,
flag=wx.ALIGN_CENTER | wx.ALL,
border=10,
)
self._sizer.AddStretchSpacer()
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.timer_callback, self._timer)
self._timer.Start(200)
def add_compass(self, id):
if not self._magcal_panels:
self._sizer.Clear(deleteWindows=True)
self._magcal_panels_sizer = wx.BoxSizer(wx.HORIZONTAL)
self._sizer.Add(
self._magcal_panels_sizer,
proportion=1,
flag=wx.EXPAND,
)
legend = MagcalPanel.legend_panel(self._main_panel, wx.ID_ANY)
self._sizer.Add(
legend,
proportion=0,
flag=wx.ALIGN_CENTER,
)
self._magcal_panels[id] = MagcalPanel(self._main_panel, wx.ID_ANY)
self._magcal_panels_sizer.Add(
self._magcal_panels[id],
proportion=1,
border=10,
flag=wx.EXPAND | wx.ALL,
)
def timer_callback(self, evt):
close_requested = False
mavlink_msgs = {}
while self._conn.poll():
m = self._conn.recv()
if isinstance(m, str) and m == 'close':
close_requested = True
continue
if m.compass_id not in mavlink_msgs:
# Keep the last two messages so that we get the last progress
# if the last message is the calibration report.
mavlink_msgs[m.compass_id] = [None, m]
else:
l = mavlink_msgs[m.compass_id]
l[0] = l[1]
l[1] = m
if close_requested:
self._timer.Stop()
self.Destroy()
return
if not mavlink_msgs:
return
needs_fit = False
for k in mavlink_msgs:
if k not in self._magcal_panels:
self.add_compass(k)
needs_fit = True
if needs_fit:
self._sizer.Fit(self)
for k, l in mavlink_msgs.items():
for m in l:
if not m:
continue
panel = self._magcal_panels[k]
if m.get_type() == 'MAG_CAL_PROGRESS':
panel.mavlink_magcal_progress(m)
elif m.get_type() == 'MAG_CAL_REPORT':
panel.mavlink_magcal_report(m)
| gpl-3.0 |
jEschweiler/Urease | urease_software/integrated_restraints/sampler.py | 1 | 17978 | import IMP
import IMP.core
import IMP.algebra
import IMP.atom
import IMP.pmi
import sys
import os.path
import IMP.display
import numpy as np
import math
import matplotlib.pyplot as plt
import time
colors = [(1,.4,.4),(.4,.4,1),(.4,1,.4),(1,.4,1),(.4,1,1),(1,.7,.4),(1,.4,.7)]
class MassSpecSystem:
def __init__(self,model,fname):
self.model = model
self.fname = fname
self.chains = []
self.ccs = []
self.radii = []
self.ccs_radii = []
self.distances = []
self.raw_connect = []
self.node_labels = []
self.node_structure = []
self.composite = []
self.distance_restraints = []
self.connectivity_restraint = []
self.nparticles = 0
self.nrestraints = 0
self.size = 0
self.ds = []
self.idx = []
self.rigid_bodies = []
self.ptypes = []
self.restraints = []
self.max_score = 100
self.distance_force_constant = 10
self.connectivity_force_constant = 10
self.refcoords = []
self.sympairs = [(0,1),(0,3),(1,3)]
self.symres = []
def convert_node_names_node_indices(self, child):
child_index = []
for i in range(len(child)):
child_index.append(self.chains.index(child[i]))
return child_index
def get_node(self, node):
tmp = node.split("=")[1].split(" ")
tmp = list(filter(None,tmp))
if len(tmp) is not 1:
#print(list(tmp)[0])
child1 = tmp[0].replace(","," ").strip().replace("["," ").replace("]"," ").strip().split()
child1 = self.convert_node_names_node_indices(child1)
parent = tmp[1].strip()
if len(tmp) is 1:
#print(tmp[0])
child1 = tmp[0].replace(","," ").strip().replace("["," ").replace("]"," ").strip().split()
child1 = self.convert_node_names_node_indices(child1)
parent = None
return [child1, parent]
def parse_raw_connectivity(self):
node_labels = []
node_structure = []
for i in range(len(self.raw_connect)):
node_labels.append(self.raw_connect[i].split("=")[0].strip())
node_structure.append(self.get_node(self.raw_connect[i]))
if (len(node_labels) == len(node_structure)):
self.node_labels = node_labels
self.node_structure = node_structure
else:
#print("error while reading tree")
sys.exit(0)
def read_restraints(self):
refcoords = []
if not os.path.exists(self.fname):
#print("%s does not exist"%(self.fname))
sys.exit(0)
else:
f = open(self.fname, 'r')
lines = f.readlines()
counter = 0
for line in lines:
if not line.startswith('##CCS'):
if not line.startswith('##DISTANCE'):
if not line.startswith('##CONNECT'):
if not line.startswith('##REF'):
if (counter is 0):
self.ccs_radii.append(line)
if (counter is 1):
self.distances.append(line)
if (counter is 2):
self.raw_connect.append(line)
if (counter is 3):
refcoords.append(line)
else:
counter = 3
else:
counter = 2
else:
counter = 1
f.close()
# loop over
for i in range(len(self.ccs_radii)):
if i is 0:
self.composite.append(self.ccs_radii[i].split(",")[0].strip())
self.composite.append(self.ccs_radii[i].split(",")[1].strip())
self.composite.append(self.ccs_radii[i].split(",")[2].strip())
else:
chain = self.ccs_radii[i].replace('"','').split(",")[0].strip()
if (len(chain) is 1):
self.chains.append(chain)
self.ccs.append(self.ccs_radii[i].split(",")[1].strip())
self.radii.append(self.ccs_radii[i].split(",")[2].strip())
self.nparticles = len(self.chains)
self.nrestraints = len(self.distances)
self.size = float(self.composite[2])
#print("SELFSIZE",self.size)
# get connectivity information from restraint file
self.parse_raw_connectivity()
#print(self.refcoords)
for item in refcoords:
item = item.translate([None, "'[]()\n"])
item = item.split(":")[1]
item = item.replace("[","").replace("]","")
F = np.fromstring(item,sep = " ")
self.refcoords.append(F)
def setup_system(self):
self.bb = IMP.algebra.BoundingBox3D(IMP.algebra.Vector3D(0,0,0),(IMP.algebra.Vector3D(self.size*2, self.size*2, self.size*2)))
#print(self.bb)
self.ps = [IMP.Particle(self.model) for x in range(self.nparticles)]
self.idx = self.model.get_particle_indexes()
self.rs = [IMP.pmi.Resolution.setup_particle(self.model, x, 300) for x in self.idx]
for i in range(self.nparticles):
init_coors = IMP.algebra.get_random_vector_in(self.bb)
#print(init_coors)
self.ds.append(IMP.core.XYZR.setup_particle(self.model, self.idx[i], IMP.algebra.Sphere3D(IMP.algebra.Vector3D(init_coors[0], init_coors[1], init_coors[2]), float(self.radii[i]))))
#print(self.ds)
self.rigid_bodies.append(IMP.core.RigidBody.setup_particle(self.model, self.ds[i],IMP.algebra.ReferenceFrame3D()))
#print("149")
self.rigid_bodies[i].set_coordinates(IMP.algebra.Vector3D(init_coors[0], init_coors[1], init_coors[2]))
#print("151")
self.rigid_bodies[i].set_coordinates_are_optimized(True)
#print("153")
IMP.atom.Mass.setup_particle(self.model, self.idx[i], 1.0)
#print("155")
def setup_symmetry_restraint(self):
for i,sp in enumerate([self.ds[8],self.ds[16]]):
IMP.core.Reference.setup_particle(sp,self.ds[0])
tr = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (i + 1)),IMP.algebra.Vector3D(0, 0, 0))
sm = IMP.core.TransformationSymmetry(tr)
c = IMP.core.SingletonConstraint(sm, None,sp)
self.symres.append(c)
for j, k in enumerate([self.ds[9],self.ds[17]]):
IMP.core.Reference.setup_particle(k, self.ds[1])
tra = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
sma = IMP.core.TransformationSymmetry(tra)
d = IMP.core.SingletonConstraint(sma,None,k)
self.symres.append(d)
#print("SELF.SYMRES ", self.symres)
for j, k in enumerate([self.ds[10],self.ds[18]]):
IMP.core.Reference.setup_particle(k, self.ds[2])
tra = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
sma = IMP.core.TransformationSymmetry(tra)
d = IMP.core.SingletonConstraint(sma,None,k)
self.symres.append(d)
for i,sp in enumerate([self.ds[11],self.ds[19]]):
IMP.core.Reference.setup_particle(sp,self.ds[3])
tr = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (i + 1)),IMP.algebra.Vector3D(0, 0, 0))
sm = IMP.core.TransformationSymmetry(tr)
c = IMP.core.SingletonConstraint(sm, None,sp)
self.symres.append(c)
for j, k in enumerate([self.ds[12],self.ds[20]]):
IMP.core.Reference.setup_particle(k, self.ds[4])
tra = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
sma = IMP.core.TransformationSymmetry(tra)
d = IMP.core.SingletonConstraint(sma,None,k)
self.symres.append(d)
#print("SELF.SYMRES ", self.symres)
for j, k in enumerate([self.ds[13],self.ds[21]]):
IMP.core.Reference.setup_particle(k, self.ds[5])
tra = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
sma = IMP.core.TransformationSymmetry(tra)
d = IMP.core.SingletonConstraint(sma,None,k)
self.symres.append(d)
#print("SELF.SYMRES ", self.symres)
for j, k in enumerate([self.ds[14],self.ds[22]]):
IMP.core.Reference.setup_particle(k, self.ds[6])
tra = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
sma = IMP.core.TransformationSymmetry(tra)
d = IMP.core.SingletonConstraint(sma,None,k)
self.symres.append(d)
#print("SELF.SYMRES ", self.symres)
for j, k in enumerate([self.ds[15],self.ds[23]]):
IMP.core.Reference.setup_particle(k, self.ds[7])
tra = IMP.algebra.Transformation3D(
IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
sma = IMP.core.TransformationSymmetry(tra)
d = IMP.core.SingletonConstraint(sma,None,k)
self.symres.append(d)
#print("SELF.SYMRES ", self.symres)
# for j, k in enumerate([self.ds[12],self.ds[19]]):
# IMP.core.Reference.setup_particle(k, self.ds[5])
# tra = IMP.algebra.Transformation3D(
# IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
# sma = IMP.core.TransformationSymmetry(tra)
# d = IMP.core.SingletonConstraint(sma,None,k)
# self.symres.append(d)
# print("SELF.SYMRES ", self.symres)
# for j, k in enumerate([self.ds[13],self.ds[20]]):
# IMP.core.Reference.setup_particle(k, self.ds[6])
# tra = IMP.algebra.Transformation3D(
# IMP.algebra.get_rotation_about_axis(IMP.algebra.get_basis_vector_3d(2),np.pi*(2/3) * (j + 1)),IMP.algebra.Vector3D(0, 0, 0))
# sma = IMP.core.TransformationSymmetry(tra)
# d = IMP.core.SingletonConstraint(sma,None,k)
# self.symres.append(d)
# print("SELF.SYMRES ", self.symres)
def setup_distance_restraints(self, d_res = 0):
tmp_distance_restraints = self.distances[d_res]
tmp_distance_restraints = tmp_distance_restraints.translate([None, "'[]()\n"])
tmp_distance_restraints = tmp_distance_restraints.replace("\n","").split(";")
tmp_distance_restraints = list(filter(None, tmp_distance_restraints))
n_distance_restraints = len(tmp_distance_restraints)
for i in range(n_distance_restraints):
distance_restraint = tmp_distance_restraints[i].replace(",","").replace("[","").replace("]","").replace("'",'').split()
distance_restraint = list(filter(None, distance_restraint))
#dref.append(",".join(distance_restraint))
if distance_restraint:
pi = int(self.chains.index(distance_restraint[0]))
pj = int(self.chains.index(distance_restraint[1]))
rij = float(distance_restraint[2].replace("]",""))
rij = rij - float(self.radii[pi]) - float(self.radii[pj])
#print(rij)
self.distance_restraints.append(IMP.atom.create_distance_restraint(self.rigid_bodies[pi], self.rigid_bodies[pj], rij, self.distance_force_constant))
def setup_connectivity_restraints(self):
# Create MS connectivity restraint
#hw = IMP.core.HarmonicWell((-18.25,27.5), self.connectivity_force_constant)
#hw = IMP.core.HarmonicWell((16,58), 100)# range for ALL protein complexes
hw = IMP.core.HarmonicWell((20,46), 100)
ss = IMP.core.DistancePairScore(hw)
self.connectivity_restraint = IMP.core.MSConnectivityRestraint(self.model, ss)
self.connectivity_restraint.set_maximum_score(0)
# Connectivity taken from the composite information -- set chain ID -- read in from the restraint file
#print("COMPOSITE ", self.composite)
for i in range(len(self.composite[0])):
index_from_chain = self.chains.index(self.composite[0][i])
self.ptypes.append(self.connectivity_restraint.add_type([self.ds[index_from_chain]]))
#print(self.ds[index_from_chain].get_radius())
# Connectivity taken from the composite information -- set chain ID -- read in from the restraint file
for i in range(len(self.node_labels)):
node = self.node_structure[i]
node_label = self.node_structure[i][0]
node_parent = self.node_structure[i][1]
if node_parent is None:
self.connectivity_restraint.add_composite(node_label)
else:
self.connectivity_restraint.add_composite(node_label, self.node_labels.index(node_parent))
def collect_restraints(self):
restraints = []
for i in self.distance_restraints:
restraints.append(i)
restraints.append(self.connectivity_restraint)
self.restraints = restraints
def setup_restraints(self, d_res = 0):
#print("SETUP RESTRAINTS")
self.distance_restraints = []
self.connectivity_restraint = []
self.setup_distance_restraints(d_res = d_res)
self.setup_symmetry_restraint()
self.setup_connectivity_restraints()
self.collect_restraints()
#print(self.connectivity_restraint.get_connected_pairs())
#print(self.connectivity_restraint.get_pair_score())
class MassSpecDynamics:
#'MC sampling options class'
def __init__(self, system, scoring_function, initial_temperature = 1000, final_temperature = 100, mc_cool_cycles = 500, mc_cool_steps = 5000, mc_cycles = 1000, mc_steps = 1000, optimization_cycles = 10):
self.system = system
self.scoring_function = scoring_function
self.initial_temperature = initial_temperature
self.final_temperature = final_temperature
self.mc_cool_cycles = mc_cool_cycles
self.mc_cool_steps = mc_cool_steps
self.mc_cycles = mc_cycles
self.mc_steps = mc_steps
self.optimization_cycles = optimization_cycles
self.movers = []
self.optimizer = IMP.core.MonteCarlo(self.system.model)
self.print_annealing = True
def get_coordinates_xyz(self, header, output):
#""" writes out coordinates in xyz format
#"""
output.write("%s\n" % len(self.system.ds))
output.write("%s\n" % header)
for index, particle in enumerate(self.system.ds):
outi = "C"+str(index)+" "+str(particle.get_coordinates()[0])+" "+str(particle.get_coordinates()[1])+" "+str(particle.get_coordinates()[2])
output.write("%s\n" % outi)
def writepym(self, fname):
w = open(fname, 'w')
w.write('from pymol.cgo import *'+ '\n')
w.write('from pymol import cmd'+ '\n')
w.write('from pymol.vfont import plain' + '\n' + 'data={}' + '\n' + "curdata=[]" + '\n')
for index, particle in enumerate(self.system.ds):
w.write("k='Protein" + str(index) + " geometry'" +'\n'+ "if not k in data.keys():" +'\n'+" data[k]=[]"+'\n'+'curdata=['+'\n'+'COLOR,' + str(colors[index][0])+","+str(colors[index][1])+","+ str(colors[index][2])+"," + '\n' + 'SPHERE,'+ str(particle.get_coordinates()[0])+ ','+ str(particle.get_coordinates()[1])+',' + str(particle.get_coordinates()[2])+','+ str(particle.get_radius()) +'\n')
w.write("]"+"\n"+"k='Protein" + str(index) + " geometry'" + '\n' + "if k in data.keys():" + "\n" + " data[k]= data[k]+curdata"+'\n'+"else:" +'\n' +" data[k]= curdata"+"\n")
w.write("for k in data.keys():" + "\n" + " cmd.load_cgo(data[k], k, 1)" +"\n"+ "data= {}")
w.close()
def initialize_MC(self):
#""" initialize MC optimizer
#"""
# Initialize Monte Carlo sampler
self.optimizer.set_return_best(True)
self.optimizer.set_score_threshold(self.system.max_score*2)
self.optimizer.set_scoring_function(self.scoring_function)
self.movers = []
self.print_annealing = False
# Accumulate movers that are need the Monte Carlo sampler
for rbd in self.system.rigid_bodies:
self.movers.append(IMP.core.RigidBodyMover(rbd, 1, 2))
# Add movers to the Monte Carlo sampler
self.optimizer.add_movers(self.movers)
def run_MC(self, i,name):
y = 0
#""" optimizies scoring function using Monte Carlo sampling
#"""
# Setup MC optimizer
self.initialize_MC()
self.optimizer.set_return_best(False)
coords = []
scores = []
for mc in range(self.optimization_cycles): ## RANDOMIZE COORDINATES
for particle in self.system.ds:
init_coors = IMP.algebra.get_random_vector_in(self.system.bb)
particle.set_coordinates(init_coors)
if y != 0:
for i in [0,1,2,3,5,8,9,10,11,13,16,17,18,19,21]:
#self.system.ds[i].set_coordinates_are_optimized(False)
self.optimizer.add_mover(self.movers[i])
cycle = "annealing"
T = self.initial_temperature
print("INIT TEMP", T)
start = time.time()
score = []
temp = []
#for cc in range(self.mc_cool_cycles*7):
print(self.optimizer.get_movers())
for cc in range(self.mc_cool_cycles*10):
#for cc in range(500): ## DO THE ANNEALING MC
T = 0.999*T
self.optimizer.set_kt(T)
self.optimizer.optimize(self.mc_cool_steps)
score.append(self.scoring_function.evaluate(False))
temp.append(T)
#plt.clf()
#plt.plot(temp, score)
#plt.ylim([0,100000])
#plt.show()
print("FINAL TEMP,", T)
T = 400
stop = time.time()
print("annealing time", stop - start)
print(y)
#for particle in self.system.ds[np.array([0,1,2,3])]:
for i in [0,1,2,3,5,8,9,10,11,13,16,17,18,19,21]:
#self.system.ds[i].set_coordinates_are_optimized(False)
self.optimizer.remove_mover(self.movers[i])
#init_coors = IMP.algebra.get_random_vector_in(self.system.bb)
#particle.set_coordinates_are_optimized(False)
print(self.optimizer.get_movers())
#for c in range(self.mc_cycles): ## DO THE CONSTANT TEMP MC
start = time.time()
score = []
step = []
for c in range(1000): ## DO THE CONSTANT TEMP MC
self.optimizer.set_kt(T)
#self.optimizer.optimize(self.mc_steps)
self.optimizer.optimize(1000000)
if c%100 == 0: ## WRITE LIST TO AN ARRAY EVERY 100TH CYCLE
score.append(self.scoring_function.evaluate(False))
step.append(c)
y+=1
list = []
for i,particle in enumerate(self.system.ds):
x = particle.get_coordinates()
list.append(x)
x = np.array(list)
if type(coords) is np.ndarray:
coords = coords.tolist()
if type(scores) is np.ndarray:
scores = scores.tolist()
coords.append(list)
#print(list)
scores.append(self.scoring_function.evaluate(False))
stop = time.time()
print("MC Time", stop - start)
coords = np.array(coords)
scores = np.array(scores)
#np.save(name.replace(".pdb1_restraints.txt","")+ "_" + str("%02d" % i) + "_" +"coordfile.npy",coords)
np.save("coordfile.npy",coords) ## ALL THE STRUCTURES
np.save(name.replace(".pdb1_restraints.txt","")+ "_" + str("%02d" % i) + "_" +"scorefile.npy",scores)
| gpl-3.0 |
scribble/scribble.github.io | src/main/jbake/assets/docs/lchannels/scripts/plot-benchmark.py | 2 | 2325 | #!/usr/bin/env python
import matplotlib.pyplot as plotlib
import numpy
import sys
DELIMITER = ','
def makePlot(infile, outfile):
import matplotlib
matplotlib.rcParams.update({'font.size': 12})
(title, headers) = readTitleAndHeaders(infile)
data = numpy.genfromtxt(infile,
delimiter=DELIMITER,
skip_header=1, # Skip benchmark title
dtype=numpy.long) / 1000000.0
box_colours = ['ForestGreen', 'SkyBlue', 'Tan', 'Plum', 'ForestGreen', 'Maroon', 'ForestGreen']
locations = range(1, len(headers) + 1)
fig = plotlib.figure()
plot = plotlib.boxplot(data, widths=0.7, notch=True, positions=locations,
patch_artist=True,
sym='') # Do not print outliers
for box, colour in zip(plot['boxes'], box_colours):
plotlib.setp(box, #color='DarkMagenta',
linewidth=1,
facecolor=colour)
# plotlib.setp(plot['whiskers'], color='DarkMagenta', linewidth=1)
# plotlib.setp(plot['caps'], color='DarkMagenta', linewidth=1)
# plotlib.setp(plot['fliers'], color='OrangeRed', marker='o', markersize=3)
# plotlib.setp(plot['medians'], color='OrangeRed', linewidth=1)
plotlib.grid(axis='y', # set y-axis grid lines
linestyle='--', # use dashed lines
which='major', # only major ticks
color='lightgrey', # line colour
alpha=0.8) # make lines semi-translucent
plotlib.xticks(locations, # tick marks
headers, # labels
rotation=25) # rotate the labels
plotlib.ylabel('milliseconds') # y-axis label
plotlib.title(title, fontsize=12, fontweight='bold') # plot title
# plotlib.show() # render the plot
fig.savefig(outfile, bbox_inches='tight')
def readTitleAndHeaders(infile):
f = open(infile)
title = f.readline()
headers = map(lambda x: x.replace(' ', "\n"),
f.readline().strip().split(DELIMITER))
f.close()
return (title, headers)
if (__name__ == '__main__'):
infile = sys.argv[1]
outfile = sys.argv[2]
makePlot(infile, outfile)
| apache-2.0 |
fro391/Investing | ArticleScrape/NewsDate.py | 1 | 3272 | import RSS_URL
from time import strftime, strptime
from datetime import datetime, timedelta
import threading
import timeit
import pandas as pd
#setting lock variable for threading
global lock
lock = threading.Lock()
def NewsDate (symbol):
url = "http://feeds.finance.yahoo.com/rss/2.0/headline?s="+symbol+"®ion=US&lang=en-US"
#timezone conversion variables
#title of articles
titles = RSS_URL.getURLs(url)
#dates of articles
dates = RSS_URL.getURLs3(url)
toBeWritten = ''
c = -1
#writes symbol, title, and date of news articles
if len(titles)!= len(dates):
pass
elif (len(titles) * len(dates)) == 0:
pass
else:
try:
for i in dates:
#encoding article titles and replacing ","
#using c as index for 'titles' list
c += 1
ArticleTitle = titles[c].encode('ascii','ignore').replace(',','')
#accounting for time zones
utc = datetime.strptime(i,'%a, %d %b %Y %H:%M:%S %z')
local = utc + timedelta(hours=-5)
#converts yahoo's timestamp to YearMonthDate
j = strftime("%Y%m%d",strptime(str(local),"%Y-%m-%d %H:%M:%S"))
#stores to be written variable
toBeWritten += (str(symbol) + j+ ',' + str(ArticleTitle) +'\n')
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print message, symbol
#write variable to file
lock.acquire()
try:
myfile.write(toBeWritten)
finally:
lock.release()
if __name__ == '__main__':
start = timeit.default_timer()
#creating file in local 'data' directory
with open('data\NewsDate'+'.csv', 'w+') as myfile:
myfile.write('Ticker&Date,Title'+'\n')
#Getting all symbols into list
with open("symbols.txt") as symbolfile:
symbolslistR = symbolfile.read()
symbolslist = symbolslistR.split('\n')
#tracks threads running
threadlist = []
#open "myfile" file for SentimentRSS to write in
with open('data\NewsDate'+'.csv', 'a') as myfile:
for u in symbolslist:
t = threading.Thread(target = NewsDate,args=(u,))
t.start()
threadlist.append(t)
#sets top limit of active threads to 10
while threading.activeCount()>50:
a=0
#print threading.activeCount()
#finishes threads before closing file
for b in threadlist:
b.join()
print '# of threads: ' + str(len(threadlist))
#group by output by number of articles per day
ND = pd.read_csv('data\NewsDate'+'.csv').groupby('Ticker&Date').count()
ND.to_csv('data\NewsDate'+'.csv')
#adds the new data to a NewsDate historical file
NDHist = pd.read_csv('data\NewsDateHist'+'.csv',index_col = 0)
NDHist.append(ND).to_csv('data\NewsDateHist'+'.csv')
#use groupby max to remove duplicates
pd.read_csv('data\NewsDateHist'+'.csv').groupby('Ticker&Date').max().to_csv('data\NewsDateHist'+'.csv')
stop = timeit.default_timer()
print stop - start | gpl-2.0 |
eggplantbren/STATS331 | Figures/marginalisation.py | 1 | 1447 | import numpy as np
import numpy.random as rng
import matplotlib.pyplot as plt
plt.rc("font", size=16, family="serif", serif="Computer Sans")
plt.rc("text", usetex=True)
rng.seed(123)
[x, y] = np.meshgrid(np.linspace(-5., 5., 101), np.linspace(-5., 5., 101))
y = y[::-1, :]
plt.figure(figsize=(12, 12))
plt.subplot(2,2,1)
plt.imshow(-np.exp(-0.5*x**2 - 0.5*(y - x)**2/0.5**2), interpolation='nearest',
cmap='gray', extent=[-5, 5, -5, 5])
plt.title('Joint Posterior Distribution')
plt.xlabel('$a$')
plt.ylabel('$b$')
plt.subplot(2,2,2, aspect='equal')
xx = rng.randn(500)
yy = xx + 0.5*rng.randn(500)
plt.plot(xx, yy, 'ko', markersize=2, alpha=0.2)
plt.title('Joint Posterior Distribution')
plt.gca().set_xticks([-4, -2, 0, 2, 4])
plt.gca().set_yticks([-4, -2, 0, 2, 4])
plt.xlim([-5, 5])
plt.ylim([-5, 5])
plt.xlabel('$a$', fontsize=20)
plt.ylabel('$b$', fontsize=20)
plt.subplot(2,2,3)
x = x[0, :]
plt.plot(x, np.exp(-0.5*x**2)/np.sqrt(2*np.pi), 'k', linewidth=2)
plt.xlim([-5, 5])
plt.ylim([0, 0.45])
plt.xlabel('$a$', fontsize=20)
plt.ylabel('Probability Density')
plt.gca().set_yticks([0, 0.2, 0.4])
plt.title('Marginal Posterior Distribution')
plt.subplot(2,2,4)
plt.hist(xx, 30, alpha=0.5)
plt.xlim([-5, 5])
plt.title('Marginal Posterior Distribution')
plt.xlabel('$a$', fontsize=20)
plt.ylabel('Number of Samples')
plt.gca().set_yticks([0, 10, 20, 30, 40, 50])
plt.savefig('marginalisation.pdf', bbox_inches='tight')
#plt.show()
| mit |
khkaminska/bokeh | bokeh/charts/_models.py | 4 | 3865 | from __future__ import absolute_import
from bokeh.properties import (HasProps, String, Either, Float, Color, Instance, List,
Any)
from ._properties import ColumnLabel, Column
from bokeh.models.sources import ColumnDataSource
from bokeh.models.renderers import GlyphRenderer
class CompositeGlyph(HasProps):
"""Represents a subset of data.
A collection of hetero or homogeneous glyph
renderers which represent a subset of data. The
purpose of the composite glyph is to abstract
away the details of constructing glyphs, based on
the details of a subset of data, from the grouping
operations that a generalized builder must implement.
In general, the Builder operates at the full column
oriented data source level, segmenting and assigning
attributes from a large selection, while the composite glyphs
will typically be passed an array-like structures with
one or more singlular attributes to apply.
Another way to explain the concept is that the Builder
operates as the groupby, as in pandas, while the
CompositeGlyph operates as the apply.
What is the responsibility of the Composite Glyph?
- Produce GlyphRenderers
- Apply any aggregations
- Tag the GlyphRenderers with the group label
- Apply transforms due to chart operations
- Operations require implementation of special methods
"""
label = String('All', help='Identifies the subset of data.')
values = Either(Column(Float), Column(String), help='Array-like values.')
color = Color(default='gray')
fill_alpha = Float(default=0.8)
source = Instance(ColumnDataSource)
operations = List(Any)
renderers = List(Instance(GlyphRenderer))
left_buffer = Float(default=0.0)
right_buffer = Float(default=0.0)
top_buffer = Float(default=0.0)
bottom_buffer = Float(default=0.0)
def __init__(self, **kwargs):
label = kwargs.pop('label', None)
if label is not None:
if not isinstance(label, str):
label = str(label)
kwargs['label'] = label
super(CompositeGlyph, self).__init__(**kwargs)
self.setup()
def setup(self):
self.renderers = [renderer for renderer in self.build_renderers()]
if self.renderers is not None:
self.refresh()
def refresh(self):
if self.renderers is not None:
self.source = self.build_source()
self._set_sources()
def build_renderers(self):
raise NotImplementedError('You must return list of renderers.')
def build_source(self):
raise NotImplementedError('You must return ColumnDataSource.')
def _set_sources(self):
"""Store reference to source in each glyph renderer."""
for renderer in self.renderers:
renderer.data_source = self.source
def __stack__(self, glyphs):
pass
def __jitter__(self, glyphs):
pass
def __dodge__(self, glyphs):
pass
def __overlay__(self, glyphs):
pass
def apply_operations(self):
pass
class CollisionModifier(HasProps):
renderers = List(Instance(CompositeGlyph))
name = String()
method_name = String()
columns = Either(ColumnLabel, List(ColumnLabel))
def add_renderer(self, renderer):
self.renderers.append(renderer)
def apply(self, renderers=None):
if len(self.renderers) == 0:
self.renderers = renderers
if len(self.renderers) > 0:
# the first renderer's operation method is applied to the rest
getattr(self.renderers[0], self.method_name)(self.renderers)
else:
raise AttributeError('%s must be applied to available renderers, none found.' %
self.__class__.__name__)
| bsd-3-clause |
liangz0707/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
jrmontag/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter2_MorePyMC/separation_plot.py | 86 | 1494 | # separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
| mit |
yavalvas/yav_com | build/matplotlib/examples/pylab_examples/finance_work2.py | 10 | 6268 | import datetime
import numpy as np
import matplotlib.colors as colors
import matplotlib.finance as finance
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
startdate = datetime.date(2006,1,1)
today = enddate = datetime.date.today()
ticker = 'SPY'
fh = finance.fetch_historical_yahoo(ticker, startdate, enddate)
# a numpy record array with fields: date, open, high, low, close, volume, adj_close)
r = mlab.csv2rec(fh); fh.close()
r.sort()
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type=='simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
def relative_strength(prices, n=14):
"""
compute the n period relative strength indicator
http://stockcharts.com/school/doku.php?id=chart_school:glossary_r#relativestrengthindex
http://www.investopedia.com/terms/r/rsi.asp
"""
deltas = np.diff(prices)
seed = deltas[:n+1]
up = seed[seed>=0].sum()/n
down = -seed[seed<0].sum()/n
rs = up/down
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1.+rs)
for i in range(n, len(prices)):
delta = deltas[i-1] # cause the diff is 1 shorter
if delta>0:
upval = delta
downval = 0.
else:
upval = 0.
downval = -delta
up = (up*(n-1) + upval)/n
down = (down*(n-1) + downval)/n
rs = up/down
rsi[i] = 100. - 100./(1.+rs)
return rsi
def moving_average_convergence(x, nslow=26, nfast=12):
"""
compute the MACD (Moving Average Convergence/Divergence) using a fast and slow exponential moving avg'
return value is emaslow, emafast, macd which are len(x) arrays
"""
emaslow = moving_average(x, nslow, type='exponential')
emafast = moving_average(x, nfast, type='exponential')
return emaslow, emafast, emafast - emaslow
plt.rc('axes', grid=True)
plt.rc('grid', color='0.75', linestyle='-', linewidth=0.5)
textsize = 9
left, width = 0.1, 0.8
rect1 = [left, 0.7, width, 0.2]
rect2 = [left, 0.3, width, 0.4]
rect3 = [left, 0.1, width, 0.2]
fig = plt.figure(facecolor='white')
axescolor = '#f6f6f6' # the axes background color
ax1 = fig.add_axes(rect1, axisbg=axescolor) #left, bottom, width, height
ax2 = fig.add_axes(rect2, axisbg=axescolor, sharex=ax1)
ax2t = ax2.twinx()
ax3 = fig.add_axes(rect3, axisbg=axescolor, sharex=ax1)
### plot the relative strength indicator
prices = r.adj_close
rsi = relative_strength(prices)
fillcolor = 'darkgoldenrod'
ax1.plot(r.date, rsi, color=fillcolor)
ax1.axhline(70, color=fillcolor)
ax1.axhline(30, color=fillcolor)
ax1.fill_between(r.date, rsi, 70, where=(rsi>=70), facecolor=fillcolor, edgecolor=fillcolor)
ax1.fill_between(r.date, rsi, 30, where=(rsi<=30), facecolor=fillcolor, edgecolor=fillcolor)
ax1.text(0.6, 0.9, '>70 = overbought', va='top', transform=ax1.transAxes, fontsize=textsize)
ax1.text(0.6, 0.1, '<30 = oversold', transform=ax1.transAxes, fontsize=textsize)
ax1.set_ylim(0, 100)
ax1.set_yticks([30,70])
ax1.text(0.025, 0.95, 'RSI (14)', va='top', transform=ax1.transAxes, fontsize=textsize)
ax1.set_title('%s daily'%ticker)
### plot the price and volume data
dx = r.adj_close - r.close
low = r.low + dx
high = r.high + dx
deltas = np.zeros_like(prices)
deltas[1:] = np.diff(prices)
up = deltas>0
ax2.vlines(r.date[up], low[up], high[up], color='black', label='_nolegend_')
ax2.vlines(r.date[~up], low[~up], high[~up], color='black', label='_nolegend_')
ma20 = moving_average(prices, 20, type='simple')
ma200 = moving_average(prices, 200, type='simple')
linema20, = ax2.plot(r.date, ma20, color='blue', lw=2, label='MA (20)')
linema200, = ax2.plot(r.date, ma200, color='red', lw=2, label='MA (200)')
last = r[-1]
s = '%s O:%1.2f H:%1.2f L:%1.2f C:%1.2f, V:%1.1fM Chg:%+1.2f' % (
today.strftime('%d-%b-%Y'),
last.open, last.high,
last.low, last.close,
last.volume*1e-6,
last.close-last.open )
t4 = ax2.text(0.3, 0.9, s, transform=ax2.transAxes, fontsize=textsize)
props = font_manager.FontProperties(size=10)
leg = ax2.legend(loc='center left', shadow=True, fancybox=True, prop=props)
leg.get_frame().set_alpha(0.5)
volume = (r.close*r.volume)/1e6 # dollar volume in millions
vmax = volume.max()
poly = ax2t.fill_between(r.date, volume, 0, label='Volume', facecolor=fillcolor, edgecolor=fillcolor)
ax2t.set_ylim(0, 5*vmax)
ax2t.set_yticks([])
### compute the MACD indicator
fillcolor = 'darkslategrey'
nslow = 26
nfast = 12
nema = 9
emaslow, emafast, macd = moving_average_convergence(prices, nslow=nslow, nfast=nfast)
ema9 = moving_average(macd, nema, type='exponential')
ax3.plot(r.date, macd, color='black', lw=2)
ax3.plot(r.date, ema9, color='blue', lw=1)
ax3.fill_between(r.date, macd-ema9, 0, alpha=0.5, facecolor=fillcolor, edgecolor=fillcolor)
ax3.text(0.025, 0.95, 'MACD (%d, %d, %d)'%(nfast, nslow, nema), va='top',
transform=ax3.transAxes, fontsize=textsize)
#ax3.set_yticks([])
# turn off upper axis tick labels, rotate the lower ones, etc
for ax in ax1, ax2, ax2t, ax3:
if ax!=ax3:
for label in ax.get_xticklabels():
label.set_visible(False)
else:
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_horizontalalignment('right')
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
class MyLocator(mticker.MaxNLocator):
def __init__(self, *args, **kwargs):
mticker.MaxNLocator.__init__(self, *args, **kwargs)
def __call__(self, *args, **kwargs):
return mticker.MaxNLocator.__call__(self, *args, **kwargs)
# at most 5 ticks, pruning the upper and lower so they don't overlap
# with other ticks
#ax2.yaxis.set_major_locator(mticker.MaxNLocator(5, prune='both'))
#ax3.yaxis.set_major_locator(mticker.MaxNLocator(5, prune='both'))
ax2.yaxis.set_major_locator(MyLocator(5, prune='both'))
ax3.yaxis.set_major_locator(MyLocator(5, prune='both'))
plt.show()
| mit |
yarikoptic/pystatsmodels | statsmodels/datasets/macrodata/data.py | 3 | 3078 | """United States Macroeconomic data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
Compiled by Skipper Seabold. All data are from the Federal Reserve Bank of St.
Louis [1] except the unemployment rate which was taken from the National
Bureau of Labor Statistics [2]. ::
[1] Data Source: FRED, Federal Reserve Economic Data, Federal Reserve Bank of
St. Louis; http://research.stlouisfed.org/fred2/; accessed December 15,
2009.
[2] Data Source: Bureau of Labor Statistics, U.S. Department of Labor;
http://www.bls.gov/data/; accessed December 15, 2009.
"""
DESCRSHORT = """US Macroeconomic Data for 1959Q1 - 2009Q3"""
DESCRLONG = DESCRSHORT
NOTE = """
Number of Observations - 203
Number of Variables - 14
Variable name definitions::
year - 1959q1 - 2009q3
quarter - 1-4
realgdp - Real gross domestic product (Bil. of chained 2005 US$,
seasonally adjusted annual rate)
realcons - Real personal consumption expenditures (Bil. of chained 2005
US$,
seasonally adjusted annual rate)
realinv - Real gross private domestic investment (Bil. of chained 2005
US$, seasonally adjusted annual rate)
realgovt - Real federal consumption expenditures & gross investment
(Bil. of chained 2005 US$, seasonally adjusted annual rate)
realdpi - Real private disposable income (Bil. of chained 2005
US$, seasonally adjusted annual rate)
cpi - End of the quarter consumer price index for all urban
consumers: all items (1982-84 = 100, seasonally adjusted).
m1 - End of the quarter M1 nominal money stock (Seasonally adjusted)
tbilrate - Quarterly monthly average of the monthly 3-month treasury bill:
secondary market rate
unemp - Seasonally adjusted unemployment rate (%)
pop - End of the quarter total population: all ages incl. armed
forces over seas
infl - Inflation rate (ln(cpi_{t}/cpi_{t-1}) * 400)
realint - Real interest rate (tbilrate - infl)
"""
from numpy import recfromtxt, column_stack, array
from pandas import DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the US macro data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
The macrodata Dataset instance does not contain endog and exog attributes.
"""
data = _get_data()
names = data.dtype.names
dataset = Dataset(data=data, names=names)
return dataset
def load_pandas():
dataset = load()
dataset.data = DataFrame(dataset.data)
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/macrodata.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
krez13/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
raghavrv/scikit-learn | examples/mixture/plot_gmm.py | 122 | 3265 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
| bsd-3-clause |
sarahgrogan/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
pandyag/trading-with-python | lib/functions.py | 76 | 11627 | # -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df) | bsd-3-clause |
delmarrerikaine/dmc-2017 | src/func.py | 1 | 4698 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import itertools as it
def toCategorical(df):
"""
This function change object datatype in pandas.DataFrame into category datatype.
Parameters
----------
df : pandas.DataFrame with train or test DataFrame.
Returns
-------
df : pandas.DataFrame with new datatypes.
"""
columns=['availability','group','content','unit','pharmForm',
'campaignIndex','salesIndex', 'category', 'manufacturer']
for col in columns:
if col in df.columns:
df[col]=df[col].astype('category')
return df
def solveNA(df,df2,coef,flag):
"""
This function fills some missing data in pandas.DataFrame.
Parameters
----------
df : pandas.DataFrame with train or test DataFrame.
df2 : pandas.DataFrame with train or test DataFrame. (Same as df if it's
all table instead just items)
coef : mean of the relationship between variables competitorPrice and rrp
flag : signal of which variant of function we use
Returns
-------
df : pandas.DataFrame with solved some NA.
"""
if flag==1:
df['pharmForm'] = df['pharmForm'].fillna('no_pharmForm')
df['category'] = df['category'].fillna(410)
df['campaignIndex'] = df['campaignIndex'].fillna('D')
elif flag==2:
df['competitorPrice'] = df['competitorPrice'].fillna(df2['rrp']*coef)
df['pharmForm'] = df['pharmForm'].fillna('no_pharmForm')
df['category'] = df['category'].fillna(410)
df['campaignIndex'] = df['campaignIndex'].fillna('D')
else:
df['competitorPrice'] = df['competitorPrice'].fillna(df2['rrp']*coef)
if 'pharmForm' in df.columns:
df['pharmForm'] = df['pharmForm'].cat.add_categories(['no_pharmForm'])
df['pharmForm'] = df['pharmForm'].fillna('no_pharmForm')
if 'category' in df.columns:
df['category'] = df['category'].cat.add_categories([410])
df['category'] = df['category'].fillna(410)
if 'campaignIndex' in df.columns:
df['campaignIndex'] = df['campaignIndex'].cat.add_categories(['D'])
df['campaignIndex'] = df['campaignIndex'].fillna('D')
columns2=['category', 'manufacturer']
for col2 in columns2:
if col2 in df.columns:
df[col2]=df[col2].astype('int')
df[col2]=df[col2].astype('category')
return df
def Dummies(df):
"""
This function creates new columns in pandas.DataFrame uses existing columns
with category datatype as input and pd.get_dummies for creation.
Parameters
----------
df : pandas.DataFrame with train or test data.
Returns
-------
df : pandas.DataFrame with new float64 columns.
"""
columns=['availability','unit','salesIndex','campaignIndex']
dumm=pd.get_dummies(df[columns])
df=pd.concat([df, dumm], axis=1)
return df
def solveCategorical(c1,df1,df2,flag):
"""
This function creates new columns in items DataFrame uses existing columns
with category datatype and high cardinality as input.
Parameters
----------
c1 : string with name of column which we try to describe
df1 : pandas.DataFrame with train data.
df2 : pandas.DataFrame with items.
flag : signal that we should generate count of examples which represent our mean
Returns
-------
df2 : pandas.DataFrame with new float64 columns.
"""
columns=['group','content','pharmForm','category','manufacturer']
for L in range(1, 4):
for col in it.combinations(columns, L):
t1=df1.groupby(list(col))
t2=t1[c1].mean()
str1='_'.join(col)
t2 = t2.reset_index()
t2.rename(columns = {c1:c1+'_'+str1+'_mean'}, inplace = True)
df2 = df2.merge(t2, on=list(col), how='left')
if flag==1:
t2=t1[c1].count()
t2 = t2.reset_index()
t2[c1]=t2[c1]/2756003.0
t2.rename(columns = {c1:c1+'_'+str1+'_count'}, inplace = True)
df2 = df2.merge(t2, on=list(col), how='left')
return df2
def moreFeautures(df):
"""
This function creates new columns in pandas.DataFrame uses existing columns
Parameters
----------
df : pandas.DataFrame with train or test data.
Returns
-------
df : pandas.DataFrame with new float64 columns.
"""
df['day_of_week']=df['day']%7
df['discount']=df['price']/df['rrp']
df['compDiscount']=df['competitorPrice']/df['price']
return df
| mit |
acislab/HuMaIN_Microservices | OCRopyServices/SegmentationService/ocrolib/psegutils.py | 6 | 7677 | from __future__ import print_function
from toplevel import *
from pylab import *
from scipy.ndimage import filters,interpolation
import sl,morph
def B(a):
if a.dtype==dtype('B'): return a
return array(a,'B')
class record:
def __init__(self,**kw): self.__dict__.update(kw)
def blackout_images(image,ticlass):
"""Takes a page image and a ticlass text/image classification image and replaces
all regions tagged as 'image' with rectangles in the page image. The page image
is modified in place. All images are iulib arrays."""
rgb = ocropy.intarray()
ticlass.textImageProbabilities(rgb,image)
r = ocropy.bytearray()
g = ocropy.bytearray()
b = ocropy.bytearray()
ocropy.unpack_rgb(r,g,b,rgb)
components = ocropy.intarray()
components.copy(g)
n = ocropy.label_components(components)
print("[note] number of image regions", n)
tirects = ocropy.rectarray()
ocropy.bounding_boxes(tirects,components)
for i in range(1,tirects.length()):
r = tirects.at(i)
ocropy.fill_rect(image,r,0)
r.pad_by(-5,-5)
ocropy.fill_rect(image,r,255)
def binary_objects(binary):
labels,n = morph.label(binary)
objects = morph.find_objects(labels)
return objects
def estimate_scale(binary):
objects = binary_objects(binary)
bysize = sorted(objects,key=sl.area)
scalemap = zeros(binary.shape)
for o in bysize:
if amax(scalemap[o])>0: continue
scalemap[o] = sl.area(o)**0.5
scale = median(scalemap[(scalemap>3)&(scalemap<100)])
return scale
def compute_boxmap(binary,scale,threshold=(.5,4),dtype='i'):
objects = binary_objects(binary)
bysize = sorted(objects,key=sl.area)
boxmap = zeros(binary.shape,dtype)
for o in bysize:
if sl.area(o)**.5<threshold[0]*scale: continue
if sl.area(o)**.5>threshold[1]*scale: continue
boxmap[o] = 1
return boxmap
def compute_lines(segmentation,scale):
"""Given a line segmentation map, computes a list
of tuples consisting of 2D slices and masked images."""
lobjects = morph.find_objects(segmentation)
lines = []
for i,o in enumerate(lobjects):
if o is None: continue
if sl.dim1(o)<2*scale or sl.dim0(o)<scale: continue
mask = (segmentation[o]==i+1)
if amax(mask)==0: continue
result = record()
result.label = i+1
result.bounds = o
result.mask = mask
lines.append(result)
return lines
def pad_image(image,d,cval=inf):
result = ones(array(image.shape)+2*d)
result[:,:] = amax(image) if cval==inf else cval
result[d:-d,d:-d] = image
return result
@checks(ARANK(2),int,int,int,int,mode=str,cval=True,_=GRAYSCALE)
def extract(image,y0,x0,y1,x1,mode='nearest',cval=0):
h,w = image.shape
ch,cw = y1-y0,x1-x0
y,x = clip(y0,0,max(h-ch,0)),clip(x0,0,max(w-cw, 0))
sub = image[y:y+ch,x:x+cw]
# print("extract", image.dtype, image.shape)
try:
r = interpolation.shift(sub,(y-y0,x-x0),mode=mode,cval=cval,order=0)
if cw > w or ch > h:
pady0, padx0 = max(-y0, 0), max(-x0, 0)
r = interpolation.affine_transform(r, eye(2), offset=(pady0, padx0), cval=1, output_shape=(ch, cw))
return r
except RuntimeError:
# workaround for platform differences between 32bit and 64bit
# scipy.ndimage
dtype = sub.dtype
sub = array(sub,dtype='float64')
sub = interpolation.shift(sub,(y-y0,x-x0),mode=mode,cval=cval,order=0)
sub = array(sub,dtype=dtype)
return sub
@checks(ARANK(2),True,pad=int,expand=int,_=GRAYSCALE)
def extract_masked(image,linedesc,pad=5,expand=0):
"""Extract a subimage from the image using the line descriptor.
A line descriptor consists of bounds and a mask."""
y0,x0,y1,x1 = [int(x) for x in [linedesc.bounds[0].start,linedesc.bounds[1].start, \
linedesc.bounds[0].stop,linedesc.bounds[1].stop]]
if pad>0:
mask = pad_image(linedesc.mask,pad,cval=0)
else:
mask = linedesc.mask
line = extract(image,y0-pad,x0-pad,y1+pad,x1+pad)
if expand>0:
mask = filters.maximum_filter(mask,(expand,expand))
line = where(mask,line,amax(line))
return line
def reading_order(lines,highlight=None,debug=0):
"""Given the list of lines (a list of 2D slices), computes
the partial reading order. The output is a binary 2D array
such that order[i,j] is true if line i comes before line j
in reading order."""
order = zeros((len(lines),len(lines)),'B')
def x_overlaps(u,v):
return u[1].start<v[1].stop and u[1].stop>v[1].start
def above(u,v):
return u[0].start<v[0].start
def left_of(u,v):
return u[1].stop<v[1].start
def separates(w,u,v):
if w[0].stop<min(u[0].start,v[0].start): return 0
if w[0].start>max(u[0].stop,v[0].stop): return 0
if w[1].start<u[1].stop and w[1].stop>v[1].start: return 1
if highlight is not None:
clf(); title("highlight"); imshow(binary); ginput(1,debug)
for i,u in enumerate(lines):
for j,v in enumerate(lines):
if x_overlaps(u,v):
if above(u,v):
order[i,j] = 1
else:
if [w for w in lines if separates(w,u,v)]==[]:
if left_of(u,v): order[i,j] = 1
if j==highlight and order[i,j]:
print((i, j), end=' ')
y0,x0 = sl.center(lines[i])
y1,x1 = sl.center(lines[j])
plot([x0,x1+200],[y0,y1])
if highlight is not None:
print()
ginput(1,debug)
return order
def topsort(order):
"""Given a binary array defining a partial order (o[i,j]==True means i<j),
compute a topological sort. This is a quick and dirty implementation
that works for up to a few thousand elements."""
n = len(order)
visited = zeros(n)
L = []
def visit(k):
if visited[k]: return
visited[k] = 1
for l in find(order[:,k]):
visit(l)
L.append(k)
for k in range(n):
visit(k)
return L #[::-1]
def show_lines(image,lines,lsort):
"""Overlays the computed lines on top of the image, for debugging
purposes."""
ys,xs = [],[]
clf(); cla()
imshow(image)
for i in range(len(lines)):
l = lines[lsort[i]]
y,x = sl.center(l.bounds)
xs.append(x)
ys.append(y)
o = l.bounds
r = matplotlib.patches.Rectangle((o[1].start,o[0].start),edgecolor='r',fill=0,width=sl.dim1(o),height=sl.dim0(o))
gca().add_patch(r)
h,w = image.shape
ylim(h,0); xlim(0,w)
plot(xs,ys)
@obsolete
def read_gray(fname):
image = imread(fname)
if image.ndim==3: image = mean(image,2)
return image
@obsolete
def read_binary(fname):
image = imread(fname)
if image.ndim==3: image = mean(image,2)
image -= amin(image)
image /= amax(image)
assert sum(image<0.01)+sum(image>0.99)>0.99*prod(image.shape),"input image is not binary"
binary = 1.0*(image<0.5)
return binary
@obsolete
def rgbshow(r,g,b=None,gn=1,cn=0,ab=0,**kw):
"""Small function to display 2 or 3 images as RGB channels."""
if b is None: b = zeros(r.shape)
combo = transpose(array([r,g,b]),axes=[1,2,0])
if cn:
for i in range(3):
combo[:,:,i] /= max(abs(amin(combo[:,:,i])),abs(amax(combo[:,:,i])))
elif gn:
combo /= max(abs(amin(combo)),abs(amax(combo)))
if ab:
combo = abs(combo)
if amin(combo)<0: print("warning: values less than zero")
imshow(clip(combo,0,1),**kw)
| apache-2.0 |
HKUST-SING/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py | 111 | 7865 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
piti118/probfit | probfit/test/testfunctor.py | 1 | 6784 | import matplotlib
matplotlib.use('Agg', warn=False)
from nose.tools import assert_equal, assert_almost_equal
import numpy as np
from probfit import (describe, rename, Convolve, Normalized,
Extended, AddPdf, AddPdfNorm, BlindFunc)
from probfit.pdf import gaussian, ugaussian
from probfit._libstat import integrate1d
from probfit.decorator import extended, normalized
def test_describe_normal_function():
def f(x, y, z):
return x + y + z
d = describe(f)
assert_equal(list(d), ['x', 'y', 'z'])
def test_Normalized():
f = ugaussian
g = Normalized(f, (-1, 1))
norm = integrate1d(f, (-1., 1.), 1000, (0., 1.))
assert_almost_equal(g(1., 0., 1.), f(1., 0., 1.) / norm)
def test_normalized_decorator():
@normalized((-1, 1))
def f(x, mean, sigma):
return ugaussian(x, mean, sigma)
g = Normalized(ugaussian, (-1, 1))
assert_equal(describe(f), ['x', 'mean', 'sigma'])
assert_almost_equal(g(1, 0, 1), f(1, 0, 1))
def test_Normalized_cache_hit():
def f(x, y, z) : return 1.*(x + y + z)
def g(x, y, z) : return 1.*(x + y + 2 * z)
nf = Normalized(f, (-10., 10.))
ng = Normalized(g, (-10., 10.))
assert_equal(nf.hit, 0)
nf(1., 2., 3.)
ng(1., 2., 3.)
assert_equal(nf.hit, 0)
nf(3., 2., 3.)
assert_equal(nf.hit, 1)
ng(1., 2., 3.)
assert_equal(ng.hit, 1)
def test_add_pdf():
def f(x, y, z): return x + y + z
def g(x, a, b): return 2 * (x + a + b)
def h(x, c, a): return 3 * (x + c + a)
A = AddPdf(f, g, h)
assert_equal(tuple(describe(A)), ('x', 'y', 'z', 'a', 'b', 'c'))
ret = A(1, 2, 3, 4, 5, 6, 7)
expected = f(1, 2, 3) + g(1, 4, 5) + h(1, 6, 4)
assert_almost_equal(ret, expected)
# wrong integral on purpose
f.integrate = lambda bound, nint, y, z : 1. # unbound method works too
g.integrate = lambda bound, nint, a, b : 2.
h.integrate = lambda bound, nint, c, a : 3.
assert_equal(integrate1d(A, (-10., 10.), 100, (1., 2., 3., 4., 5.)), 6.)
def test_add_pdf_factor():
def f(x, y, z): return x + y + z
def g(x, a, b): return 2 * (x + a + b)
def k1(n1, n2): return 3 * (n1 + n2)
def k2(n1, y): return 4 * (n1 + y)
A = AddPdf(f, g, prefix=['f', 'g'], factors=[k1, k2])
assert_equal(tuple(describe(A)), ('x', 'fy', 'fz', 'ga', 'gb', 'fn1', 'fn2', 'gn1', 'gy'))
ret = A(1, 2, 3, 4, 5, 6, 7, 8, 9)
expected = k1(6, 7) * f(1, 2, 3) + k2(8, 9) * g(1, 4, 5)
assert_almost_equal(ret, expected)
parts = A.eval_parts(1, 2, 3, 4, 5, 6, 7, 8, 9)
assert_almost_equal(parts[0], k1(6, 7) * f(1, 2, 3))
assert_almost_equal(parts[1], k2(8, 9) * g(1, 4, 5))
def test_add_pdf_cache():
def f(x, y, z): return x + y + z
def g(x, a, b): return 2 * (x + a + b)
def h(x, c, a): return 3 * (x + c + a)
A = AddPdf(f, g, h)
assert_equal(tuple(describe(A)), ('x', 'y', 'z', 'a', 'b', 'c'))
ret = A(1, 2, 3, 4, 5, 6, 7)
assert_equal(A.hit, 0)
expected = f(1, 2, 3) + g(1, 4, 5) + h(1, 6, 4)
assert_almost_equal(ret, expected)
ret = A(1, 2, 3, 6, 7, 8, 9)
assert_equal(A.hit, 1)
expected = f(1, 2, 3) + g(1, 6, 7) + h(1, 8, 6)
assert_almost_equal(ret, expected)
def test_extended():
def f(x, y, z): return x + 2 * y + 3 * z
g = Extended(f)
assert_equal(tuple(describe(g)), ('x', 'y', 'z', 'N'))
assert_equal(g(1, 2, 3, 4), 4 * (f(1, 2, 3)))
# extended should use analytical when available
def ana_int(x, y): return y * x ** 2
ana_int_int = lambda b, n, y: 999. # wrong on purpose
ana_int.integrate = ana_int_int
g = Extended(ana_int)
assert_almost_equal(g.integrate((0, 1), 100, 5., 2.), 999.*2.)
# and not fail when it's not available
def no_ana_int(x, y): return y * x ** 2
g = Extended(no_ana_int)
assert_almost_equal(g.integrate((0, 1), 100, 5., 2.), (1.**3) / 3.*5.*2.)
def test_extended_decorator():
def f(x, y, z): return x + 2 * y + 3 * z
@extended()
def g(x, y, z):
return x + 2 * y + 3 * z
assert_equal(tuple(describe(g)), ('x', 'y', 'z', 'N'))
assert_equal(g(1, 2, 3, 4), 4 * (f(1, 2, 3)))
def test_addpdfnorm():
def f(x, y, z): return x + 2 * y + 3 * z
def g(x, z, p): return 4 * x + 5 * z + 6 * z
def p(x, y, q): return 7 * x + 8 * y + 9 * q
h = AddPdfNorm(f, g)
assert_equal(describe(h), ['x', 'y', 'z', 'p', 'f_0'])
q = AddPdfNorm(f, g, p)
assert_equal(describe(q), ['x', 'y', 'z', 'p', 'q', 'f_0', 'f_1'])
assert_almost_equal(h(1, 2, 3, 4, 0.1),
0.1 * f(1, 2, 3) + 0.9 * g(1, 3, 4))
assert_almost_equal(q(1, 2, 3, 4, 5, 0.1, 0.2),
0.1 * f(1, 2, 3) + 0.2 * g(1, 3, 4) + 0.7 * p(1, 2, 5))
def test_addpdfnorm_analytical_integrate():
def f(x, y, z): return x + 2 * y + 3 * z
def g(x, z, p): return 4 * x + 5 * z + 6 * z
def p(x, y, q): return 7 * x + 8 * y + 9 * q
f.integrate = lambda bound, nint, y, z: 1.
g.integrate = lambda bound, nint, z, p: 2.
p.integrate = lambda bound, nint, y, q: 3.
q = AddPdfNorm(f, g, p)
assert_equal(describe(q), ['x', 'y', 'z', 'p', 'q', 'f_0', 'f_1'])
integral = integrate1d(q, (-10., 10.), 100, (1., 2., 3., 4., 0.1, 0.2))
assert_almost_equal(integral, 0.1 * 1. + 0.2 * 2. + 0.7 * 3.)
def test_convolution():
f = gaussian
g = lambda x, mu1, sigma1 : gaussian(x, mu1, sigma1)
h = Convolve(f, g, (-10, 10), nbins=10000)
assert_equal(describe(h), ['x', 'mean', 'sigma', 'mu1', 'sigma1'])
assert_almost_equal(h(1, 0, 1, 1, 2), 0.17839457037411527) # center
assert_almost_equal(h(-1, 0, 1, 1, 2), 0.119581456625684) # left
assert_almost_equal(h(0, 0, 1, 1, 2), 0.1614180824489487) # left
assert_almost_equal(h(2, 0, 1, 1, 2), 0.1614180824489487) # right
assert_almost_equal(h(3, 0, 1, 1, 2), 0.119581456625684) # right
def test_rename():
def f(x, y, z):
return None
assert_equal(describe(f), ['x', 'y', 'z'])
g = rename(f, ['x', 'a', 'b'])
assert_equal(describe(g), ['x', 'a', 'b'])
def test_blindfunc():
np.random.seed(0)
f = BlindFunc(gaussian, 'mean', 'abcd', width=1.5, signflip=True)
arg = f.__shift_arg__((1, 1, 1))
totest = [1., -1.1665264284482637, 1.]
assert_almost_equal(arg[0], totest[0])
assert_almost_equal(arg[1], totest[1])
assert_almost_equal(arg[2], totest[2])
assert_almost_equal(f.__call__(0.5, 1., 1.), 0.0995003913596)
np.random.seed(575345)
f = BlindFunc(gaussian, 'mean', 'abcd', width=1.5, signflip=True)
arg = f.__shift_arg__((1, 1, 1))
assert_almost_equal(arg[0], totest[0])
assert_almost_equal(arg[1], totest[1])
assert_almost_equal(arg[2], totest[2])
assert_almost_equal(f.__call__(0.5, 1., 1.), 0.0995003913596)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.